]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.3.0-201204010912.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.3.0-201204010912.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 0c083c5..9c2512a 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -69,6 +73,7 @@ Image
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -92,19 +97,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -115,9 +125,11 @@ devlist.h*
67 dnotify_test
68 docproc
69 dslm
70 +dtc-lexer.lex.c
71 elf2ecoff
72 elfconfig.h*
73 evergreen_reg_safe.h
74 +exception_policy.conf
75 fixdep
76 flask.h
77 fore200e_mkfirm
78 @@ -125,12 +137,15 @@ fore200e_pca_fw.c*
79 gconf
80 gconf.glade.h
81 gen-devlist
82 +gen-kdb_cmds.c
83 gen_crc32table
84 gen_init_cpio
85 generated
86 genheaders
87 genksyms
88 *_gray256.c
89 +hash
90 +hid-example
91 hpet_example
92 hugepage-mmap
93 hugepage-shm
94 @@ -145,7 +160,7 @@ int32.c
95 int4.c
96 int8.c
97 kallsyms
98 -kconfig
99 +kern_constants.h
100 keywords.c
101 ksym.c*
102 ksym.h*
103 @@ -153,7 +168,7 @@ kxgettext
104 lkc_defs.h
105 lex.c
106 lex.*.c
107 -linux
108 +lib1funcs.S
109 logo_*.c
110 logo_*_clut224.c
111 logo_*_mono.c
112 @@ -165,14 +180,15 @@ machtypes.h
113 map
114 map_hugetlb
115 maui_boot.h
116 -media
117 mconf
118 +mdp
119 miboot*
120 mk_elfconfig
121 mkboot
122 mkbugboot
123 mkcpustr
124 mkdep
125 +mkpiggy
126 mkprep
127 mkregtable
128 mktables
129 @@ -208,6 +224,7 @@ r300_reg_safe.h
130 r420_reg_safe.h
131 r600_reg_safe.h
132 recordmcount
133 +regdb.c
134 relocs
135 rlim_names.h
136 rn50_reg_safe.h
137 @@ -218,6 +235,7 @@ setup
138 setup.bin
139 setup.elf
140 sImage
141 +slabinfo
142 sm_tbl*
143 split-include
144 syscalltab.h
145 @@ -228,6 +246,7 @@ tftpboot.img
146 timeconst.h
147 times.h*
148 trix_boot.h
149 +user_constants.h
150 utsrelease.h*
151 vdso-syms.lds
152 vdso.lds
153 @@ -245,7 +264,9 @@ vmlinux
154 vmlinux-*
155 vmlinux.aout
156 vmlinux.bin.all
157 +vmlinux.bin.bz2
158 vmlinux.lds
159 +vmlinux.relocs
160 vmlinuz
161 voffset.h
162 vsyscall.lds
163 @@ -253,9 +274,11 @@ vsyscall_32.lds
164 wanxlfw.inc
165 uImage
166 unifdef
167 +utsrelease.h
168 wakeup.bin
169 wakeup.elf
170 wakeup.lds
171 zImage*
172 zconf.hash.c
173 +zconf.lex.c
174 zoffset.h
175 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
176 index d99fd9c..8689fef 100644
177 --- a/Documentation/kernel-parameters.txt
178 +++ b/Documentation/kernel-parameters.txt
179 @@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
180 the specified number of seconds. This is to be used if
181 your oopses keep scrolling off the screen.
182
183 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
184 + virtualization environments that don't cope well with the
185 + expand down segment used by UDEREF on X86-32 or the frequent
186 + page table updates on X86-64.
187 +
188 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
189 +
190 pcbit= [HW,ISDN]
191
192 pcd. [PARIDE]
193 diff --git a/Makefile b/Makefile
194 index 1932984..0204e68 100644
195 --- a/Makefile
196 +++ b/Makefile
197 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
198
199 HOSTCC = gcc
200 HOSTCXX = g++
201 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
202 -HOSTCXXFLAGS = -O2
203 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
204 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
205 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
206
207 # Decide whether to build built-in, modular, or both.
208 # Normally, just do built-in.
209 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
210 # Rules shared between *config targets and build targets
211
212 # Basic helpers built in scripts/
213 -PHONY += scripts_basic
214 -scripts_basic:
215 +PHONY += scripts_basic gcc-plugins
216 +scripts_basic: gcc-plugins
217 $(Q)$(MAKE) $(build)=scripts/basic
218 $(Q)rm -f .tmp_quiet_recordmcount
219
220 @@ -564,6 +565,50 @@ else
221 KBUILD_CFLAGS += -O2
222 endif
223
224 +ifndef DISABLE_PAX_PLUGINS
225 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
226 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
227 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
228 +endif
229 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
230 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
231 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
232 +endif
233 +ifdef CONFIG_KALLOCSTAT_PLUGIN
234 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
235 +endif
236 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
237 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
238 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
239 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
240 +endif
241 +ifdef CONFIG_CHECKER_PLUGIN
242 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
243 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
244 +endif
245 +endif
246 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
247 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
248 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
249 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
250 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
251 +ifeq ($(KBUILD_EXTMOD),)
252 +gcc-plugins:
253 + $(Q)$(MAKE) $(build)=tools/gcc
254 +else
255 +gcc-plugins: ;
256 +endif
257 +else
258 +gcc-plugins:
259 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
260 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
261 +else
262 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
263 +endif
264 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
265 +endif
266 +endif
267 +
268 include $(srctree)/arch/$(SRCARCH)/Makefile
269
270 ifneq ($(CONFIG_FRAME_WARN),0)
271 @@ -708,7 +753,7 @@ export mod_strip_cmd
272
273
274 ifeq ($(KBUILD_EXTMOD),)
275 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
276 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
277
278 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
279 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
280 @@ -932,6 +977,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
281
282 # The actual objects are generated when descending,
283 # make sure no implicit rule kicks in
284 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
286 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
287
288 # Handle descending into subdirectories listed in $(vmlinux-dirs)
289 @@ -941,7 +988,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
290 # Error messages still appears in the original language
291
292 PHONY += $(vmlinux-dirs)
293 -$(vmlinux-dirs): prepare scripts
294 +$(vmlinux-dirs): gcc-plugins prepare scripts
295 $(Q)$(MAKE) $(build)=$@
296
297 # Store (new) KERNELRELASE string in include/config/kernel.release
298 @@ -985,6 +1032,7 @@ prepare0: archprepare FORCE
299 $(Q)$(MAKE) $(build)=.
300
301 # All the preparing..
302 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
303 prepare: prepare0
304
305 # Generate some files
306 @@ -1089,6 +1137,8 @@ all: modules
307 # using awk while concatenating to the final file.
308
309 PHONY += modules
310 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
311 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
312 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
313 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
314 @$(kecho) ' Building modules, stage 2.';
315 @@ -1104,7 +1154,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
316
317 # Target to prepare building external modules
318 PHONY += modules_prepare
319 -modules_prepare: prepare scripts
320 +modules_prepare: gcc-plugins prepare scripts
321
322 # Target to install modules
323 PHONY += modules_install
324 @@ -1201,6 +1251,7 @@ distclean: mrproper
325 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
326 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
327 -o -name '.*.rej' \
328 + -o -name '.*.rej' -o -name '*.so' \
329 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
330 -type f -print | xargs rm -f
331
332 @@ -1361,6 +1412,8 @@ PHONY += $(module-dirs) modules
333 $(module-dirs): crmodverdir $(objtree)/Module.symvers
334 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
335
336 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
337 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
338 modules: $(module-dirs)
339 @$(kecho) ' Building modules, stage 2.';
340 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
341 @@ -1487,17 +1540,21 @@ else
342 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
343 endif
344
345 -%.s: %.c prepare scripts FORCE
346 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348 +%.s: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.i: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 -%.o: %.c prepare scripts FORCE
353 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
354 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
355 +%.o: %.c gcc-plugins prepare scripts FORCE
356 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
357 %.lst: %.c prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 -%.s: %.S prepare scripts FORCE
360 +%.s: %.S gcc-plugins prepare scripts FORCE
361 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
362 -%.o: %.S prepare scripts FORCE
363 +%.o: %.S gcc-plugins prepare scripts FORCE
364 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
365 %.symtypes: %.c prepare scripts FORCE
366 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
367 @@ -1507,11 +1564,15 @@ endif
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371 -%/: prepare scripts FORCE
372 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374 +%/: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir)
378 -%.ko: prepare scripts FORCE
379 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381 +%.ko: gcc-plugins prepare scripts FORCE
382 $(cmd_crmodverdir)
383 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
384 $(build)=$(build-dir) $(@:.ko=.o)
385 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
386 index 640f909..48b6597 100644
387 --- a/arch/alpha/include/asm/atomic.h
388 +++ b/arch/alpha/include/asm/atomic.h
389 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
390 #define atomic_dec(v) atomic_sub(1,(v))
391 #define atomic64_dec(v) atomic64_sub(1,(v))
392
393 +#define atomic64_read_unchecked(v) atomic64_read(v)
394 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
395 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
396 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
397 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
398 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
399 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
400 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
401 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
402 +
403 #define smp_mb__before_atomic_dec() smp_mb()
404 #define smp_mb__after_atomic_dec() smp_mb()
405 #define smp_mb__before_atomic_inc() smp_mb()
406 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
407 index ad368a9..fbe0f25 100644
408 --- a/arch/alpha/include/asm/cache.h
409 +++ b/arch/alpha/include/asm/cache.h
410 @@ -4,19 +4,19 @@
411 #ifndef __ARCH_ALPHA_CACHE_H
412 #define __ARCH_ALPHA_CACHE_H
413
414 +#include <linux/const.h>
415
416 /* Bytes per L1 (data) cache line. */
417 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
418 -# define L1_CACHE_BYTES 64
419 # define L1_CACHE_SHIFT 6
420 #else
421 /* Both EV4 and EV5 are write-through, read-allocate,
422 direct-mapped, physical.
423 */
424 -# define L1_CACHE_BYTES 32
425 # define L1_CACHE_SHIFT 5
426 #endif
427
428 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
429 #define SMP_CACHE_BYTES L1_CACHE_BYTES
430
431 #endif
432 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
433 index da5449e..7418343 100644
434 --- a/arch/alpha/include/asm/elf.h
435 +++ b/arch/alpha/include/asm/elf.h
436 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
437
438 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
439
440 +#ifdef CONFIG_PAX_ASLR
441 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
442 +
443 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
444 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
445 +#endif
446 +
447 /* $0 is set by ld.so to a pointer to a function which might be
448 registered using atexit. This provides a mean for the dynamic
449 linker to call DT_FINI functions for shared libraries that have
450 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
451 index de98a73..bd4f1f8 100644
452 --- a/arch/alpha/include/asm/pgtable.h
453 +++ b/arch/alpha/include/asm/pgtable.h
454 @@ -101,6 +101,17 @@ struct vm_area_struct;
455 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
456 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
457 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
458 +
459 +#ifdef CONFIG_PAX_PAGEEXEC
460 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
461 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
462 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
463 +#else
464 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
465 +# define PAGE_COPY_NOEXEC PAGE_COPY
466 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
467 +#endif
468 +
469 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
470
471 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
472 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
473 index 2fd00b7..cfd5069 100644
474 --- a/arch/alpha/kernel/module.c
475 +++ b/arch/alpha/kernel/module.c
476 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
477
478 /* The small sections were sorted to the end of the segment.
479 The following should definitely cover them. */
480 - gp = (u64)me->module_core + me->core_size - 0x8000;
481 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
482 got = sechdrs[me->arch.gotsecindex].sh_addr;
483
484 for (i = 0; i < n; i++) {
485 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
486 index 01e8715..be0e80f 100644
487 --- a/arch/alpha/kernel/osf_sys.c
488 +++ b/arch/alpha/kernel/osf_sys.c
489 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
490 /* At this point: (!vma || addr < vma->vm_end). */
491 if (limit - len < addr)
492 return -ENOMEM;
493 - if (!vma || addr + len <= vma->vm_start)
494 + if (check_heap_stack_gap(vma, addr, len))
495 return addr;
496 addr = vma->vm_end;
497 vma = vma->vm_next;
498 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
499 merely specific addresses, but regions of memory -- perhaps
500 this feature should be incorporated into all ports? */
501
502 +#ifdef CONFIG_PAX_RANDMMAP
503 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
504 +#endif
505 +
506 if (addr) {
507 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
508 if (addr != (unsigned long) -ENOMEM)
509 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
510 }
511
512 /* Next, try allocating at TASK_UNMAPPED_BASE. */
513 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
514 - len, limit);
515 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
516 +
517 if (addr != (unsigned long) -ENOMEM)
518 return addr;
519
520 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
521 index fadd5f8..904e73a 100644
522 --- a/arch/alpha/mm/fault.c
523 +++ b/arch/alpha/mm/fault.c
524 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
525 __reload_thread(pcb);
526 }
527
528 +#ifdef CONFIG_PAX_PAGEEXEC
529 +/*
530 + * PaX: decide what to do with offenders (regs->pc = fault address)
531 + *
532 + * returns 1 when task should be killed
533 + * 2 when patched PLT trampoline was detected
534 + * 3 when unpatched PLT trampoline was detected
535 + */
536 +static int pax_handle_fetch_fault(struct pt_regs *regs)
537 +{
538 +
539 +#ifdef CONFIG_PAX_EMUPLT
540 + int err;
541 +
542 + do { /* PaX: patched PLT emulation #1 */
543 + unsigned int ldah, ldq, jmp;
544 +
545 + err = get_user(ldah, (unsigned int *)regs->pc);
546 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
547 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
548 +
549 + if (err)
550 + break;
551 +
552 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
553 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
554 + jmp == 0x6BFB0000U)
555 + {
556 + unsigned long r27, addr;
557 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
558 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
559 +
560 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
561 + err = get_user(r27, (unsigned long *)addr);
562 + if (err)
563 + break;
564 +
565 + regs->r27 = r27;
566 + regs->pc = r27;
567 + return 2;
568 + }
569 + } while (0);
570 +
571 + do { /* PaX: patched PLT emulation #2 */
572 + unsigned int ldah, lda, br;
573 +
574 + err = get_user(ldah, (unsigned int *)regs->pc);
575 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
576 + err |= get_user(br, (unsigned int *)(regs->pc+8));
577 +
578 + if (err)
579 + break;
580 +
581 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
582 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
583 + (br & 0xFFE00000U) == 0xC3E00000U)
584 + {
585 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
586 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
587 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
588 +
589 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
590 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
591 + return 2;
592 + }
593 + } while (0);
594 +
595 + do { /* PaX: unpatched PLT emulation */
596 + unsigned int br;
597 +
598 + err = get_user(br, (unsigned int *)regs->pc);
599 +
600 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
601 + unsigned int br2, ldq, nop, jmp;
602 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
603 +
604 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
605 + err = get_user(br2, (unsigned int *)addr);
606 + err |= get_user(ldq, (unsigned int *)(addr+4));
607 + err |= get_user(nop, (unsigned int *)(addr+8));
608 + err |= get_user(jmp, (unsigned int *)(addr+12));
609 + err |= get_user(resolver, (unsigned long *)(addr+16));
610 +
611 + if (err)
612 + break;
613 +
614 + if (br2 == 0xC3600000U &&
615 + ldq == 0xA77B000CU &&
616 + nop == 0x47FF041FU &&
617 + jmp == 0x6B7B0000U)
618 + {
619 + regs->r28 = regs->pc+4;
620 + regs->r27 = addr+16;
621 + regs->pc = resolver;
622 + return 3;
623 + }
624 + }
625 + } while (0);
626 +#endif
627 +
628 + return 1;
629 +}
630 +
631 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
632 +{
633 + unsigned long i;
634 +
635 + printk(KERN_ERR "PAX: bytes at PC: ");
636 + for (i = 0; i < 5; i++) {
637 + unsigned int c;
638 + if (get_user(c, (unsigned int *)pc+i))
639 + printk(KERN_CONT "???????? ");
640 + else
641 + printk(KERN_CONT "%08x ", c);
642 + }
643 + printk("\n");
644 +}
645 +#endif
646
647 /*
648 * This routine handles page faults. It determines the address,
649 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
650 good_area:
651 si_code = SEGV_ACCERR;
652 if (cause < 0) {
653 - if (!(vma->vm_flags & VM_EXEC))
654 + if (!(vma->vm_flags & VM_EXEC)) {
655 +
656 +#ifdef CONFIG_PAX_PAGEEXEC
657 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
658 + goto bad_area;
659 +
660 + up_read(&mm->mmap_sem);
661 + switch (pax_handle_fetch_fault(regs)) {
662 +
663 +#ifdef CONFIG_PAX_EMUPLT
664 + case 2:
665 + case 3:
666 + return;
667 +#endif
668 +
669 + }
670 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
671 + do_group_exit(SIGKILL);
672 +#else
673 goto bad_area;
674 +#endif
675 +
676 + }
677 } else if (!cause) {
678 /* Allow reads even for write-only mappings */
679 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
680 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
681 index 86976d0..8e07f84 100644
682 --- a/arch/arm/include/asm/atomic.h
683 +++ b/arch/arm/include/asm/atomic.h
684 @@ -15,6 +15,10 @@
685 #include <linux/types.h>
686 #include <asm/system.h>
687
688 +#ifdef CONFIG_GENERIC_ATOMIC64
689 +#include <asm-generic/atomic64.h>
690 +#endif
691 +
692 #define ATOMIC_INIT(i) { (i) }
693
694 #ifdef __KERNEL__
695 @@ -25,7 +29,15 @@
696 * atomic_set() is the clrex or dummy strex done on every exception return.
697 */
698 #define atomic_read(v) (*(volatile int *)&(v)->counter)
699 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
700 +{
701 + return v->counter;
702 +}
703 #define atomic_set(v,i) (((v)->counter) = (i))
704 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
705 +{
706 + v->counter = i;
707 +}
708
709 #if __LINUX_ARM_ARCH__ >= 6
710
711 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
712 int result;
713
714 __asm__ __volatile__("@ atomic_add\n"
715 +"1: ldrex %1, [%3]\n"
716 +" adds %0, %1, %4\n"
717 +
718 +#ifdef CONFIG_PAX_REFCOUNT
719 +" bvc 3f\n"
720 +"2: bkpt 0xf103\n"
721 +"3:\n"
722 +#endif
723 +
724 +" strex %1, %0, [%3]\n"
725 +" teq %1, #0\n"
726 +" bne 1b"
727 +
728 +#ifdef CONFIG_PAX_REFCOUNT
729 +"\n4:\n"
730 + _ASM_EXTABLE(2b, 4b)
731 +#endif
732 +
733 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
734 + : "r" (&v->counter), "Ir" (i)
735 + : "cc");
736 +}
737 +
738 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
739 +{
740 + unsigned long tmp;
741 + int result;
742 +
743 + __asm__ __volatile__("@ atomic_add_unchecked\n"
744 "1: ldrex %0, [%3]\n"
745 " add %0, %0, %4\n"
746 " strex %1, %0, [%3]\n"
747 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
748 smp_mb();
749
750 __asm__ __volatile__("@ atomic_add_return\n"
751 +"1: ldrex %1, [%3]\n"
752 +" adds %0, %1, %4\n"
753 +
754 +#ifdef CONFIG_PAX_REFCOUNT
755 +" bvc 3f\n"
756 +" mov %0, %1\n"
757 +"2: bkpt 0xf103\n"
758 +"3:\n"
759 +#endif
760 +
761 +" strex %1, %0, [%3]\n"
762 +" teq %1, #0\n"
763 +" bne 1b"
764 +
765 +#ifdef CONFIG_PAX_REFCOUNT
766 +"\n4:\n"
767 + _ASM_EXTABLE(2b, 4b)
768 +#endif
769 +
770 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
771 + : "r" (&v->counter), "Ir" (i)
772 + : "cc");
773 +
774 + smp_mb();
775 +
776 + return result;
777 +}
778 +
779 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
780 +{
781 + unsigned long tmp;
782 + int result;
783 +
784 + smp_mb();
785 +
786 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
787 "1: ldrex %0, [%3]\n"
788 " add %0, %0, %4\n"
789 " strex %1, %0, [%3]\n"
790 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
791 int result;
792
793 __asm__ __volatile__("@ atomic_sub\n"
794 +"1: ldrex %1, [%3]\n"
795 +" subs %0, %1, %4\n"
796 +
797 +#ifdef CONFIG_PAX_REFCOUNT
798 +" bvc 3f\n"
799 +"2: bkpt 0xf103\n"
800 +"3:\n"
801 +#endif
802 +
803 +" strex %1, %0, [%3]\n"
804 +" teq %1, #0\n"
805 +" bne 1b"
806 +
807 +#ifdef CONFIG_PAX_REFCOUNT
808 +"\n4:\n"
809 + _ASM_EXTABLE(2b, 4b)
810 +#endif
811 +
812 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
813 + : "r" (&v->counter), "Ir" (i)
814 + : "cc");
815 +}
816 +
817 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
818 +{
819 + unsigned long tmp;
820 + int result;
821 +
822 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
823 "1: ldrex %0, [%3]\n"
824 " sub %0, %0, %4\n"
825 " strex %1, %0, [%3]\n"
826 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
827 smp_mb();
828
829 __asm__ __volatile__("@ atomic_sub_return\n"
830 -"1: ldrex %0, [%3]\n"
831 -" sub %0, %0, %4\n"
832 +"1: ldrex %1, [%3]\n"
833 +" sub %0, %1, %4\n"
834 +
835 +#ifdef CONFIG_PAX_REFCOUNT
836 +" bvc 3f\n"
837 +" mov %0, %1\n"
838 +"2: bkpt 0xf103\n"
839 +"3:\n"
840 +#endif
841 +
842 " strex %1, %0, [%3]\n"
843 " teq %1, #0\n"
844 " bne 1b"
845 +
846 +#ifdef CONFIG_PAX_REFCOUNT
847 +"\n4:\n"
848 + _ASM_EXTABLE(2b, 4b)
849 +#endif
850 +
851 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
852 : "r" (&v->counter), "Ir" (i)
853 : "cc");
854 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
855 return oldval;
856 }
857
858 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
859 +{
860 + unsigned long oldval, res;
861 +
862 + smp_mb();
863 +
864 + do {
865 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
866 + "ldrex %1, [%3]\n"
867 + "mov %0, #0\n"
868 + "teq %1, %4\n"
869 + "strexeq %0, %5, [%3]\n"
870 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
871 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
872 + : "cc");
873 + } while (res);
874 +
875 + smp_mb();
876 +
877 + return oldval;
878 +}
879 +
880 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
881 {
882 unsigned long tmp, tmp2;
883 @@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
884
885 return val;
886 }
887 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
888 #define atomic_add(i, v) (void) atomic_add_return(i, v)
889 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
890
891 static inline int atomic_sub_return(int i, atomic_t *v)
892 {
893 @@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
894
895 return val;
896 }
897 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
898 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
899 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
900
901 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
902 {
903 @@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
904
905 return ret;
906 }
907 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
908
909 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
910 {
911 @@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
912 #endif /* __LINUX_ARM_ARCH__ */
913
914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
915 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
916 +{
917 + return xchg(&v->counter, new);
918 +}
919
920 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
921 {
922 @@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
923 }
924
925 #define atomic_inc(v) atomic_add(1, v)
926 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
927 +{
928 + atomic_add_unchecked(1, v);
929 +}
930 #define atomic_dec(v) atomic_sub(1, v)
931 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
932 +{
933 + atomic_sub_unchecked(1, v);
934 +}
935
936 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
937 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
938 +{
939 + return atomic_add_return_unchecked(1, v) == 0;
940 +}
941 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
942 #define atomic_inc_return(v) (atomic_add_return(1, v))
943 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
944 +{
945 + return atomic_add_return_unchecked(1, v);
946 +}
947 #define atomic_dec_return(v) (atomic_sub_return(1, v))
948 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
949
950 @@ -239,6 +406,14 @@ typedef struct {
951 u64 __aligned(8) counter;
952 } atomic64_t;
953
954 +#ifdef CONFIG_PAX_REFCOUNT
955 +typedef struct {
956 + u64 __aligned(8) counter;
957 +} atomic64_unchecked_t;
958 +#else
959 +typedef atomic64_t atomic64_unchecked_t;
960 +#endif
961 +
962 #define ATOMIC64_INIT(i) { (i) }
963
964 static inline u64 atomic64_read(atomic64_t *v)
965 @@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
966 return result;
967 }
968
969 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
970 +{
971 + u64 result;
972 +
973 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
974 +" ldrexd %0, %H0, [%1]"
975 + : "=&r" (result)
976 + : "r" (&v->counter), "Qo" (v->counter)
977 + );
978 +
979 + return result;
980 +}
981 +
982 static inline void atomic64_set(atomic64_t *v, u64 i)
983 {
984 u64 tmp;
985 @@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
986 : "cc");
987 }
988
989 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
990 +{
991 + u64 tmp;
992 +
993 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
994 +"1: ldrexd %0, %H0, [%2]\n"
995 +" strexd %0, %3, %H3, [%2]\n"
996 +" teq %0, #0\n"
997 +" bne 1b"
998 + : "=&r" (tmp), "=Qo" (v->counter)
999 + : "r" (&v->counter), "r" (i)
1000 + : "cc");
1001 +}
1002 +
1003 static inline void atomic64_add(u64 i, atomic64_t *v)
1004 {
1005 u64 result;
1006 @@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1007 __asm__ __volatile__("@ atomic64_add\n"
1008 "1: ldrexd %0, %H0, [%3]\n"
1009 " adds %0, %0, %4\n"
1010 +" adcs %H0, %H0, %H4\n"
1011 +
1012 +#ifdef CONFIG_PAX_REFCOUNT
1013 +" bvc 3f\n"
1014 +"2: bkpt 0xf103\n"
1015 +"3:\n"
1016 +#endif
1017 +
1018 +" strexd %1, %0, %H0, [%3]\n"
1019 +" teq %1, #0\n"
1020 +" bne 1b"
1021 +
1022 +#ifdef CONFIG_PAX_REFCOUNT
1023 +"\n4:\n"
1024 + _ASM_EXTABLE(2b, 4b)
1025 +#endif
1026 +
1027 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1028 + : "r" (&v->counter), "r" (i)
1029 + : "cc");
1030 +}
1031 +
1032 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1033 +{
1034 + u64 result;
1035 + unsigned long tmp;
1036 +
1037 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1038 +"1: ldrexd %0, %H0, [%3]\n"
1039 +" adds %0, %0, %4\n"
1040 " adc %H0, %H0, %H4\n"
1041 " strexd %1, %0, %H0, [%3]\n"
1042 " teq %1, #0\n"
1043 @@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1044
1045 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1046 {
1047 - u64 result;
1048 - unsigned long tmp;
1049 + u64 result, tmp;
1050
1051 smp_mb();
1052
1053 __asm__ __volatile__("@ atomic64_add_return\n"
1054 +"1: ldrexd %1, %H1, [%3]\n"
1055 +" adds %0, %1, %4\n"
1056 +" adcs %H0, %H1, %H4\n"
1057 +
1058 +#ifdef CONFIG_PAX_REFCOUNT
1059 +" bvc 3f\n"
1060 +" mov %0, %1\n"
1061 +" mov %H0, %H1\n"
1062 +"2: bkpt 0xf103\n"
1063 +"3:\n"
1064 +#endif
1065 +
1066 +" strexd %1, %0, %H0, [%3]\n"
1067 +" teq %1, #0\n"
1068 +" bne 1b"
1069 +
1070 +#ifdef CONFIG_PAX_REFCOUNT
1071 +"\n4:\n"
1072 + _ASM_EXTABLE(2b, 4b)
1073 +#endif
1074 +
1075 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1076 + : "r" (&v->counter), "r" (i)
1077 + : "cc");
1078 +
1079 + smp_mb();
1080 +
1081 + return result;
1082 +}
1083 +
1084 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1085 +{
1086 + u64 result;
1087 + unsigned long tmp;
1088 +
1089 + smp_mb();
1090 +
1091 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1092 "1: ldrexd %0, %H0, [%3]\n"
1093 " adds %0, %0, %4\n"
1094 " adc %H0, %H0, %H4\n"
1095 @@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1096 __asm__ __volatile__("@ atomic64_sub\n"
1097 "1: ldrexd %0, %H0, [%3]\n"
1098 " subs %0, %0, %4\n"
1099 +" sbcs %H0, %H0, %H4\n"
1100 +
1101 +#ifdef CONFIG_PAX_REFCOUNT
1102 +" bvc 3f\n"
1103 +"2: bkpt 0xf103\n"
1104 +"3:\n"
1105 +#endif
1106 +
1107 +" strexd %1, %0, %H0, [%3]\n"
1108 +" teq %1, #0\n"
1109 +" bne 1b"
1110 +
1111 +#ifdef CONFIG_PAX_REFCOUNT
1112 +"\n4:\n"
1113 + _ASM_EXTABLE(2b, 4b)
1114 +#endif
1115 +
1116 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1117 + : "r" (&v->counter), "r" (i)
1118 + : "cc");
1119 +}
1120 +
1121 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1122 +{
1123 + u64 result;
1124 + unsigned long tmp;
1125 +
1126 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1127 +"1: ldrexd %0, %H0, [%3]\n"
1128 +" subs %0, %0, %4\n"
1129 " sbc %H0, %H0, %H4\n"
1130 " strexd %1, %0, %H0, [%3]\n"
1131 " teq %1, #0\n"
1132 @@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1133
1134 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1135 {
1136 - u64 result;
1137 - unsigned long tmp;
1138 + u64 result, tmp;
1139
1140 smp_mb();
1141
1142 __asm__ __volatile__("@ atomic64_sub_return\n"
1143 -"1: ldrexd %0, %H0, [%3]\n"
1144 -" subs %0, %0, %4\n"
1145 -" sbc %H0, %H0, %H4\n"
1146 +"1: ldrexd %1, %H1, [%3]\n"
1147 +" subs %0, %1, %4\n"
1148 +" sbc %H0, %H1, %H4\n"
1149 +
1150 +#ifdef CONFIG_PAX_REFCOUNT
1151 +" bvc 3f\n"
1152 +" mov %0, %1\n"
1153 +" mov %H0, %H1\n"
1154 +"2: bkpt 0xf103\n"
1155 +"3:\n"
1156 +#endif
1157 +
1158 " strexd %1, %0, %H0, [%3]\n"
1159 " teq %1, #0\n"
1160 " bne 1b"
1161 +
1162 +#ifdef CONFIG_PAX_REFCOUNT
1163 +"\n4:\n"
1164 + _ASM_EXTABLE(2b, 4b)
1165 +#endif
1166 +
1167 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1168 : "r" (&v->counter), "r" (i)
1169 : "cc");
1170 @@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1171 return oldval;
1172 }
1173
1174 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1175 +{
1176 + u64 oldval;
1177 + unsigned long res;
1178 +
1179 + smp_mb();
1180 +
1181 + do {
1182 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1183 + "ldrexd %1, %H1, [%3]\n"
1184 + "mov %0, #0\n"
1185 + "teq %1, %4\n"
1186 + "teqeq %H1, %H4\n"
1187 + "strexdeq %0, %5, %H5, [%3]"
1188 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1189 + : "r" (&ptr->counter), "r" (old), "r" (new)
1190 + : "cc");
1191 + } while (res);
1192 +
1193 + smp_mb();
1194 +
1195 + return oldval;
1196 +}
1197 +
1198 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1199 {
1200 u64 result;
1201 @@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1202
1203 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1204 {
1205 - u64 result;
1206 - unsigned long tmp;
1207 + u64 result, tmp;
1208
1209 smp_mb();
1210
1211 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1212 -"1: ldrexd %0, %H0, [%3]\n"
1213 -" subs %0, %0, #1\n"
1214 -" sbc %H0, %H0, #0\n"
1215 +"1: ldrexd %1, %H1, [%3]\n"
1216 +" subs %0, %1, #1\n"
1217 +" sbc %H0, %H1, #0\n"
1218 +
1219 +#ifdef CONFIG_PAX_REFCOUNT
1220 +" bvc 3f\n"
1221 +" mov %0, %1\n"
1222 +" mov %H0, %H1\n"
1223 +"2: bkpt 0xf103\n"
1224 +"3:\n"
1225 +#endif
1226 +
1227 " teq %H0, #0\n"
1228 -" bmi 2f\n"
1229 +" bmi 4f\n"
1230 " strexd %1, %0, %H0, [%3]\n"
1231 " teq %1, #0\n"
1232 " bne 1b\n"
1233 -"2:"
1234 +"4:\n"
1235 +
1236 +#ifdef CONFIG_PAX_REFCOUNT
1237 + _ASM_EXTABLE(2b, 4b)
1238 +#endif
1239 +
1240 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1241 : "r" (&v->counter)
1242 : "cc");
1243 @@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1244 " teq %0, %5\n"
1245 " teqeq %H0, %H5\n"
1246 " moveq %1, #0\n"
1247 -" beq 2f\n"
1248 +" beq 4f\n"
1249 " adds %0, %0, %6\n"
1250 " adc %H0, %H0, %H6\n"
1251 +
1252 +#ifdef CONFIG_PAX_REFCOUNT
1253 +" bvc 3f\n"
1254 +"2: bkpt 0xf103\n"
1255 +"3:\n"
1256 +#endif
1257 +
1258 " strexd %2, %0, %H0, [%4]\n"
1259 " teq %2, #0\n"
1260 " bne 1b\n"
1261 -"2:"
1262 +"4:\n"
1263 +
1264 +#ifdef CONFIG_PAX_REFCOUNT
1265 + _ASM_EXTABLE(2b, 4b)
1266 +#endif
1267 +
1268 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1269 : "r" (&v->counter), "r" (u), "r" (a)
1270 : "cc");
1271 @@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1272
1273 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1274 #define atomic64_inc(v) atomic64_add(1LL, (v))
1275 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1276 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1277 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1278 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1279 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1280 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1281 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1282 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1283 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1284 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1285 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1286 index 75fe66b..2255c86 100644
1287 --- a/arch/arm/include/asm/cache.h
1288 +++ b/arch/arm/include/asm/cache.h
1289 @@ -4,8 +4,10 @@
1290 #ifndef __ASMARM_CACHE_H
1291 #define __ASMARM_CACHE_H
1292
1293 +#include <linux/const.h>
1294 +
1295 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1296 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1297 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1298
1299 /*
1300 * Memory returned by kmalloc() may be used for DMA, so we must make
1301 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1302 index d5d8d5c..ad92c96 100644
1303 --- a/arch/arm/include/asm/cacheflush.h
1304 +++ b/arch/arm/include/asm/cacheflush.h
1305 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1306 void (*dma_unmap_area)(const void *, size_t, int);
1307
1308 void (*dma_flush_range)(const void *, const void *);
1309 -};
1310 +} __no_const;
1311
1312 /*
1313 * Select the calling method
1314 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1315 index 0e9ce8d..6ef1e03 100644
1316 --- a/arch/arm/include/asm/elf.h
1317 +++ b/arch/arm/include/asm/elf.h
1318 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1319 the loader. We need to make sure that it is out of the way of the program
1320 that it will "exec", and that there is sufficient room for the brk. */
1321
1322 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1323 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1324 +
1325 +#ifdef CONFIG_PAX_ASLR
1326 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1327 +
1328 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1329 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1330 +#endif
1331
1332 /* When the program starts, a1 contains a pointer to a function to be
1333 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1334 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1335 extern void elf_set_personality(const struct elf32_hdr *);
1336 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1337
1338 -struct mm_struct;
1339 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1340 -#define arch_randomize_brk arch_randomize_brk
1341 -
1342 extern int vectors_user_mapping(void);
1343 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1344 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1345 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1346 index e51b1e8..32a3113 100644
1347 --- a/arch/arm/include/asm/kmap_types.h
1348 +++ b/arch/arm/include/asm/kmap_types.h
1349 @@ -21,6 +21,7 @@ enum km_type {
1350 KM_L1_CACHE,
1351 KM_L2_CACHE,
1352 KM_KDB,
1353 + KM_CLEARPAGE,
1354 KM_TYPE_NR
1355 };
1356
1357 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1358 index 53426c6..c7baff3 100644
1359 --- a/arch/arm/include/asm/outercache.h
1360 +++ b/arch/arm/include/asm/outercache.h
1361 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1362 #endif
1363 void (*set_debug)(unsigned long);
1364 void (*resume)(void);
1365 -};
1366 +} __no_const;
1367
1368 #ifdef CONFIG_OUTER_CACHE
1369
1370 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1371 index 97b440c..b7ff179 100644
1372 --- a/arch/arm/include/asm/page.h
1373 +++ b/arch/arm/include/asm/page.h
1374 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1375 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1376 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1377 unsigned long vaddr, struct vm_area_struct *vma);
1378 -};
1379 +} __no_const;
1380
1381 #ifdef MULTI_USER
1382 extern struct cpu_user_fns cpu_user;
1383 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1384 index e4c96cc..1145653 100644
1385 --- a/arch/arm/include/asm/system.h
1386 +++ b/arch/arm/include/asm/system.h
1387 @@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1388
1389 #define xchg(ptr,x) \
1390 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1391 +#define xchg_unchecked(ptr,x) \
1392 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1393
1394 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1395
1396 @@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1397
1398 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1399
1400 +#define _ASM_EXTABLE(from, to) \
1401 +" .pushsection __ex_table,\"a\"\n"\
1402 +" .align 3\n" \
1403 +" .long " #from ", " #to"\n" \
1404 +" .popsection"
1405 +
1406 +
1407 #endif /* __ASSEMBLY__ */
1408
1409 #define arch_align_stack(x) (x)
1410 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1411 index 2958976..12ccac4 100644
1412 --- a/arch/arm/include/asm/uaccess.h
1413 +++ b/arch/arm/include/asm/uaccess.h
1414 @@ -22,6 +22,8 @@
1415 #define VERIFY_READ 0
1416 #define VERIFY_WRITE 1
1417
1418 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1419 +
1420 /*
1421 * The exception table consists of pairs of addresses: the first is the
1422 * address of an instruction that is allowed to fault, and the second is
1423 @@ -387,8 +389,23 @@ do { \
1424
1425
1426 #ifdef CONFIG_MMU
1427 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1428 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1429 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1430 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1431 +
1432 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1433 +{
1434 + if (!__builtin_constant_p(n))
1435 + check_object_size(to, n, false);
1436 + return ___copy_from_user(to, from, n);
1437 +}
1438 +
1439 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1440 +{
1441 + if (!__builtin_constant_p(n))
1442 + check_object_size(from, n, true);
1443 + return ___copy_to_user(to, from, n);
1444 +}
1445 +
1446 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1447 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1448 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1449 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1450
1451 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1452 {
1453 + if ((long)n < 0)
1454 + return n;
1455 +
1456 if (access_ok(VERIFY_READ, from, n))
1457 n = __copy_from_user(to, from, n);
1458 else /* security hole - plug it */
1459 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1460
1461 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1462 {
1463 + if ((long)n < 0)
1464 + return n;
1465 +
1466 if (access_ok(VERIFY_WRITE, to, n))
1467 n = __copy_to_user(to, from, n);
1468 return n;
1469 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1470 index 5b0bce6..becd81c 100644
1471 --- a/arch/arm/kernel/armksyms.c
1472 +++ b/arch/arm/kernel/armksyms.c
1473 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1474 #ifdef CONFIG_MMU
1475 EXPORT_SYMBOL(copy_page);
1476
1477 -EXPORT_SYMBOL(__copy_from_user);
1478 -EXPORT_SYMBOL(__copy_to_user);
1479 +EXPORT_SYMBOL(___copy_from_user);
1480 +EXPORT_SYMBOL(___copy_to_user);
1481 EXPORT_SYMBOL(__clear_user);
1482
1483 EXPORT_SYMBOL(__get_user_1);
1484 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1485 index 971d65c..cc936fb 100644
1486 --- a/arch/arm/kernel/process.c
1487 +++ b/arch/arm/kernel/process.c
1488 @@ -28,7 +28,6 @@
1489 #include <linux/tick.h>
1490 #include <linux/utsname.h>
1491 #include <linux/uaccess.h>
1492 -#include <linux/random.h>
1493 #include <linux/hw_breakpoint.h>
1494 #include <linux/cpuidle.h>
1495
1496 @@ -273,9 +272,10 @@ void machine_power_off(void)
1497 machine_shutdown();
1498 if (pm_power_off)
1499 pm_power_off();
1500 + BUG();
1501 }
1502
1503 -void machine_restart(char *cmd)
1504 +__noreturn void machine_restart(char *cmd)
1505 {
1506 machine_shutdown();
1507
1508 @@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1509 return 0;
1510 }
1511
1512 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1513 -{
1514 - unsigned long range_end = mm->brk + 0x02000000;
1515 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1516 -}
1517 -
1518 #ifdef CONFIG_MMU
1519 /*
1520 * The vectors page is always readable from user space for the
1521 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1522 index a255c39..4a19b25 100644
1523 --- a/arch/arm/kernel/setup.c
1524 +++ b/arch/arm/kernel/setup.c
1525 @@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1526 struct cpu_tlb_fns cpu_tlb __read_mostly;
1527 #endif
1528 #ifdef MULTI_USER
1529 -struct cpu_user_fns cpu_user __read_mostly;
1530 +struct cpu_user_fns cpu_user __read_only;
1531 #endif
1532 #ifdef MULTI_CACHE
1533 -struct cpu_cache_fns cpu_cache __read_mostly;
1534 +struct cpu_cache_fns cpu_cache __read_only;
1535 #endif
1536 #ifdef CONFIG_OUTER_CACHE
1537 -struct outer_cache_fns outer_cache __read_mostly;
1538 +struct outer_cache_fns outer_cache __read_only;
1539 EXPORT_SYMBOL(outer_cache);
1540 #endif
1541
1542 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1543 index f84dfe6..13e94f7 100644
1544 --- a/arch/arm/kernel/traps.c
1545 +++ b/arch/arm/kernel/traps.c
1546 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1547
1548 static DEFINE_RAW_SPINLOCK(die_lock);
1549
1550 +extern void gr_handle_kernel_exploit(void);
1551 +
1552 /*
1553 * This function is protected against re-entrancy.
1554 */
1555 @@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1556 panic("Fatal exception in interrupt");
1557 if (panic_on_oops)
1558 panic("Fatal exception");
1559 +
1560 + gr_handle_kernel_exploit();
1561 +
1562 if (ret != NOTIFY_STOP)
1563 do_exit(SIGSEGV);
1564 }
1565 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1566 index 66a477a..bee61d3 100644
1567 --- a/arch/arm/lib/copy_from_user.S
1568 +++ b/arch/arm/lib/copy_from_user.S
1569 @@ -16,7 +16,7 @@
1570 /*
1571 * Prototype:
1572 *
1573 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1574 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1575 *
1576 * Purpose:
1577 *
1578 @@ -84,11 +84,11 @@
1579
1580 .text
1581
1582 -ENTRY(__copy_from_user)
1583 +ENTRY(___copy_from_user)
1584
1585 #include "copy_template.S"
1586
1587 -ENDPROC(__copy_from_user)
1588 +ENDPROC(___copy_from_user)
1589
1590 .pushsection .fixup,"ax"
1591 .align 0
1592 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1593 index 6ee2f67..d1cce76 100644
1594 --- a/arch/arm/lib/copy_page.S
1595 +++ b/arch/arm/lib/copy_page.S
1596 @@ -10,6 +10,7 @@
1597 * ASM optimised string functions
1598 */
1599 #include <linux/linkage.h>
1600 +#include <linux/const.h>
1601 #include <asm/assembler.h>
1602 #include <asm/asm-offsets.h>
1603 #include <asm/cache.h>
1604 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1605 index d066df6..df28194 100644
1606 --- a/arch/arm/lib/copy_to_user.S
1607 +++ b/arch/arm/lib/copy_to_user.S
1608 @@ -16,7 +16,7 @@
1609 /*
1610 * Prototype:
1611 *
1612 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1613 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1614 *
1615 * Purpose:
1616 *
1617 @@ -88,11 +88,11 @@
1618 .text
1619
1620 ENTRY(__copy_to_user_std)
1621 -WEAK(__copy_to_user)
1622 +WEAK(___copy_to_user)
1623
1624 #include "copy_template.S"
1625
1626 -ENDPROC(__copy_to_user)
1627 +ENDPROC(___copy_to_user)
1628 ENDPROC(__copy_to_user_std)
1629
1630 .pushsection .fixup,"ax"
1631 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1632 index 5c908b1..e712687 100644
1633 --- a/arch/arm/lib/uaccess.S
1634 +++ b/arch/arm/lib/uaccess.S
1635 @@ -20,7 +20,7 @@
1636
1637 #define PAGE_SHIFT 12
1638
1639 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1640 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1641 * Purpose : copy a block to user memory from kernel memory
1642 * Params : to - user memory
1643 * : from - kernel memory
1644 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1645 sub r2, r2, ip
1646 b .Lc2u_dest_aligned
1647
1648 -ENTRY(__copy_to_user)
1649 +ENTRY(___copy_to_user)
1650 stmfd sp!, {r2, r4 - r7, lr}
1651 cmp r2, #4
1652 blt .Lc2u_not_enough
1653 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1654 ldrgtb r3, [r1], #0
1655 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1656 b .Lc2u_finished
1657 -ENDPROC(__copy_to_user)
1658 +ENDPROC(___copy_to_user)
1659
1660 .pushsection .fixup,"ax"
1661 .align 0
1662 9001: ldmfd sp!, {r0, r4 - r7, pc}
1663 .popsection
1664
1665 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1666 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1667 * Purpose : copy a block from user memory to kernel memory
1668 * Params : to - kernel memory
1669 * : from - user memory
1670 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1671 sub r2, r2, ip
1672 b .Lcfu_dest_aligned
1673
1674 -ENTRY(__copy_from_user)
1675 +ENTRY(___copy_from_user)
1676 stmfd sp!, {r0, r2, r4 - r7, lr}
1677 cmp r2, #4
1678 blt .Lcfu_not_enough
1679 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1680 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1681 strgtb r3, [r0], #1
1682 b .Lcfu_finished
1683 -ENDPROC(__copy_from_user)
1684 +ENDPROC(___copy_from_user)
1685
1686 .pushsection .fixup,"ax"
1687 .align 0
1688 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1689 index 025f742..8432b08 100644
1690 --- a/arch/arm/lib/uaccess_with_memcpy.c
1691 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1692 @@ -104,7 +104,7 @@ out:
1693 }
1694
1695 unsigned long
1696 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1697 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1698 {
1699 /*
1700 * This test is stubbed out of the main function above to keep
1701 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1702 index 6722627..8f97548c 100644
1703 --- a/arch/arm/mach-omap2/board-n8x0.c
1704 +++ b/arch/arm/mach-omap2/board-n8x0.c
1705 @@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1706 }
1707 #endif
1708
1709 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1710 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1711 .late_init = n8x0_menelaus_late_init,
1712 };
1713
1714 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1715 index 2b2d51c..0127490 100644
1716 --- a/arch/arm/mach-ux500/mbox-db5500.c
1717 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1718 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1719 return sprintf(buf, "0x%X\n", mbox_value);
1720 }
1721
1722 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1723 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1724
1725 static int mbox_show(struct seq_file *s, void *data)
1726 {
1727 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1728 index bb7eac3..3bade16 100644
1729 --- a/arch/arm/mm/fault.c
1730 +++ b/arch/arm/mm/fault.c
1731 @@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1732 }
1733 #endif
1734
1735 +#ifdef CONFIG_PAX_PAGEEXEC
1736 + if (fsr & FSR_LNX_PF) {
1737 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1738 + do_group_exit(SIGKILL);
1739 + }
1740 +#endif
1741 +
1742 tsk->thread.address = addr;
1743 tsk->thread.error_code = fsr;
1744 tsk->thread.trap_no = 14;
1745 @@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1746 }
1747 #endif /* CONFIG_MMU */
1748
1749 +#ifdef CONFIG_PAX_PAGEEXEC
1750 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1751 +{
1752 + long i;
1753 +
1754 + printk(KERN_ERR "PAX: bytes at PC: ");
1755 + for (i = 0; i < 20; i++) {
1756 + unsigned char c;
1757 + if (get_user(c, (__force unsigned char __user *)pc+i))
1758 + printk(KERN_CONT "?? ");
1759 + else
1760 + printk(KERN_CONT "%02x ", c);
1761 + }
1762 + printk("\n");
1763 +
1764 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1765 + for (i = -1; i < 20; i++) {
1766 + unsigned long c;
1767 + if (get_user(c, (__force unsigned long __user *)sp+i))
1768 + printk(KERN_CONT "???????? ");
1769 + else
1770 + printk(KERN_CONT "%08lx ", c);
1771 + }
1772 + printk("\n");
1773 +}
1774 +#endif
1775 +
1776 /*
1777 * First Level Translation Fault Handler
1778 *
1779 @@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1780 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1781 struct siginfo info;
1782
1783 +#ifdef CONFIG_PAX_REFCOUNT
1784 + if (fsr_fs(ifsr) == 2) {
1785 + unsigned int bkpt;
1786 +
1787 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1788 + current->thread.error_code = ifsr;
1789 + current->thread.trap_no = 0;
1790 + pax_report_refcount_overflow(regs);
1791 + fixup_exception(regs);
1792 + return;
1793 + }
1794 + }
1795 +#endif
1796 +
1797 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1798 return;
1799
1800 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1801 index ce8cb19..3ec539d 100644
1802 --- a/arch/arm/mm/mmap.c
1803 +++ b/arch/arm/mm/mmap.c
1804 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1805 if (len > TASK_SIZE)
1806 return -ENOMEM;
1807
1808 +#ifdef CONFIG_PAX_RANDMMAP
1809 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1810 +#endif
1811 +
1812 if (addr) {
1813 if (do_align)
1814 addr = COLOUR_ALIGN(addr, pgoff);
1815 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1816 addr = PAGE_ALIGN(addr);
1817
1818 vma = find_vma(mm, addr);
1819 - if (TASK_SIZE - len >= addr &&
1820 - (!vma || addr + len <= vma->vm_start))
1821 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1822 return addr;
1823 }
1824 if (len > mm->cached_hole_size) {
1825 - start_addr = addr = mm->free_area_cache;
1826 + start_addr = addr = mm->free_area_cache;
1827 } else {
1828 - start_addr = addr = mm->mmap_base;
1829 - mm->cached_hole_size = 0;
1830 + start_addr = addr = mm->mmap_base;
1831 + mm->cached_hole_size = 0;
1832 }
1833
1834 full_search:
1835 @@ -124,14 +127,14 @@ full_search:
1836 * Start a new search - just in case we missed
1837 * some holes.
1838 */
1839 - if (start_addr != TASK_UNMAPPED_BASE) {
1840 - start_addr = addr = TASK_UNMAPPED_BASE;
1841 + if (start_addr != mm->mmap_base) {
1842 + start_addr = addr = mm->mmap_base;
1843 mm->cached_hole_size = 0;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848 - if (!vma || addr + len <= vma->vm_start) {
1849 + if (check_heap_stack_gap(vma, addr, len)) {
1850 /*
1851 * Remember the place where we stopped the search:
1852 */
1853 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1854
1855 if (mmap_is_legacy()) {
1856 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1857 +
1858 +#ifdef CONFIG_PAX_RANDMMAP
1859 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1860 + mm->mmap_base += mm->delta_mmap;
1861 +#endif
1862 +
1863 mm->get_unmapped_area = arch_get_unmapped_area;
1864 mm->unmap_area = arch_unmap_area;
1865 } else {
1866 mm->mmap_base = mmap_base(random_factor);
1867 +
1868 +#ifdef CONFIG_PAX_RANDMMAP
1869 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1870 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1871 +#endif
1872 +
1873 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1874 mm->unmap_area = arch_unmap_area_topdown;
1875 }
1876 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1877 index 71a6827..e7fbc23 100644
1878 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1879 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1880 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1881 int (*started)(unsigned ch);
1882 int (*flush)(unsigned ch);
1883 int (*stop)(unsigned ch);
1884 -};
1885 +} __no_const;
1886
1887 extern void *samsung_dmadev_get_ops(void);
1888 extern void *s3c_dma_get_ops(void);
1889 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1890 index 5f28cae..3d23723 100644
1891 --- a/arch/arm/plat-samsung/include/plat/ehci.h
1892 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
1893 @@ -14,7 +14,7 @@
1894 struct s5p_ehci_platdata {
1895 int (*phy_init)(struct platform_device *pdev, int type);
1896 int (*phy_exit)(struct platform_device *pdev, int type);
1897 -};
1898 +} __no_const;
1899
1900 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1901
1902 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1903 index c3a58a1..78fbf54 100644
1904 --- a/arch/avr32/include/asm/cache.h
1905 +++ b/arch/avr32/include/asm/cache.h
1906 @@ -1,8 +1,10 @@
1907 #ifndef __ASM_AVR32_CACHE_H
1908 #define __ASM_AVR32_CACHE_H
1909
1910 +#include <linux/const.h>
1911 +
1912 #define L1_CACHE_SHIFT 5
1913 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1914 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1915
1916 /*
1917 * Memory returned by kmalloc() may be used for DMA, so we must make
1918 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1919 index 3b3159b..425ea94 100644
1920 --- a/arch/avr32/include/asm/elf.h
1921 +++ b/arch/avr32/include/asm/elf.h
1922 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1923 the loader. We need to make sure that it is out of the way of the program
1924 that it will "exec", and that there is sufficient room for the brk. */
1925
1926 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1927 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1928
1929 +#ifdef CONFIG_PAX_ASLR
1930 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1931 +
1932 +#define PAX_DELTA_MMAP_LEN 15
1933 +#define PAX_DELTA_STACK_LEN 15
1934 +#endif
1935
1936 /* This yields a mask that user programs can use to figure out what
1937 instruction set this CPU supports. This could be done in user space,
1938 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1939 index b7f5c68..556135c 100644
1940 --- a/arch/avr32/include/asm/kmap_types.h
1941 +++ b/arch/avr32/include/asm/kmap_types.h
1942 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1943 D(11) KM_IRQ1,
1944 D(12) KM_SOFTIRQ0,
1945 D(13) KM_SOFTIRQ1,
1946 -D(14) KM_TYPE_NR
1947 +D(14) KM_CLEARPAGE,
1948 +D(15) KM_TYPE_NR
1949 };
1950
1951 #undef D
1952 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1953 index f7040a1..db9f300 100644
1954 --- a/arch/avr32/mm/fault.c
1955 +++ b/arch/avr32/mm/fault.c
1956 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1957
1958 int exception_trace = 1;
1959
1960 +#ifdef CONFIG_PAX_PAGEEXEC
1961 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1962 +{
1963 + unsigned long i;
1964 +
1965 + printk(KERN_ERR "PAX: bytes at PC: ");
1966 + for (i = 0; i < 20; i++) {
1967 + unsigned char c;
1968 + if (get_user(c, (unsigned char *)pc+i))
1969 + printk(KERN_CONT "???????? ");
1970 + else
1971 + printk(KERN_CONT "%02x ", c);
1972 + }
1973 + printk("\n");
1974 +}
1975 +#endif
1976 +
1977 /*
1978 * This routine handles page faults. It determines the address and the
1979 * problem, and then passes it off to one of the appropriate routines.
1980 @@ -156,6 +173,16 @@ bad_area:
1981 up_read(&mm->mmap_sem);
1982
1983 if (user_mode(regs)) {
1984 +
1985 +#ifdef CONFIG_PAX_PAGEEXEC
1986 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1987 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1988 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1989 + do_group_exit(SIGKILL);
1990 + }
1991 + }
1992 +#endif
1993 +
1994 if (exception_trace && printk_ratelimit())
1995 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1996 "sp %08lx ecr %lu\n",
1997 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1998 index 568885a..f8008df 100644
1999 --- a/arch/blackfin/include/asm/cache.h
2000 +++ b/arch/blackfin/include/asm/cache.h
2001 @@ -7,6 +7,7 @@
2002 #ifndef __ARCH_BLACKFIN_CACHE_H
2003 #define __ARCH_BLACKFIN_CACHE_H
2004
2005 +#include <linux/const.h>
2006 #include <linux/linkage.h> /* for asmlinkage */
2007
2008 /*
2009 @@ -14,7 +15,7 @@
2010 * Blackfin loads 32 bytes for cache
2011 */
2012 #define L1_CACHE_SHIFT 5
2013 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2014 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2015 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2016
2017 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2018 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2019 index aea2718..3639a60 100644
2020 --- a/arch/cris/include/arch-v10/arch/cache.h
2021 +++ b/arch/cris/include/arch-v10/arch/cache.h
2022 @@ -1,8 +1,9 @@
2023 #ifndef _ASM_ARCH_CACHE_H
2024 #define _ASM_ARCH_CACHE_H
2025
2026 +#include <linux/const.h>
2027 /* Etrax 100LX have 32-byte cache-lines. */
2028 -#define L1_CACHE_BYTES 32
2029 #define L1_CACHE_SHIFT 5
2030 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2031
2032 #endif /* _ASM_ARCH_CACHE_H */
2033 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2034 index 1de779f..336fad3 100644
2035 --- a/arch/cris/include/arch-v32/arch/cache.h
2036 +++ b/arch/cris/include/arch-v32/arch/cache.h
2037 @@ -1,11 +1,12 @@
2038 #ifndef _ASM_CRIS_ARCH_CACHE_H
2039 #define _ASM_CRIS_ARCH_CACHE_H
2040
2041 +#include <linux/const.h>
2042 #include <arch/hwregs/dma.h>
2043
2044 /* A cache-line is 32 bytes. */
2045 -#define L1_CACHE_BYTES 32
2046 #define L1_CACHE_SHIFT 5
2047 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2048
2049 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2050
2051 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2052 index 0d8a7d6..d0c9ff5 100644
2053 --- a/arch/frv/include/asm/atomic.h
2054 +++ b/arch/frv/include/asm/atomic.h
2055 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2056 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2057 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2058
2059 +#define atomic64_read_unchecked(v) atomic64_read(v)
2060 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2061 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2062 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2063 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2064 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2065 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2066 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2067 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2068 +
2069 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2070 {
2071 int c, old;
2072 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2073 index 2797163..c2a401d 100644
2074 --- a/arch/frv/include/asm/cache.h
2075 +++ b/arch/frv/include/asm/cache.h
2076 @@ -12,10 +12,11 @@
2077 #ifndef __ASM_CACHE_H
2078 #define __ASM_CACHE_H
2079
2080 +#include <linux/const.h>
2081
2082 /* bytes per L1 cache line */
2083 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2084 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2085 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2086
2087 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2088 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2089 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2090 index f8e16b2..c73ff79 100644
2091 --- a/arch/frv/include/asm/kmap_types.h
2092 +++ b/arch/frv/include/asm/kmap_types.h
2093 @@ -23,6 +23,7 @@ enum km_type {
2094 KM_IRQ1,
2095 KM_SOFTIRQ0,
2096 KM_SOFTIRQ1,
2097 + KM_CLEARPAGE,
2098 KM_TYPE_NR
2099 };
2100
2101 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2102 index 385fd30..6c3d97e 100644
2103 --- a/arch/frv/mm/elf-fdpic.c
2104 +++ b/arch/frv/mm/elf-fdpic.c
2105 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2106 if (addr) {
2107 addr = PAGE_ALIGN(addr);
2108 vma = find_vma(current->mm, addr);
2109 - if (TASK_SIZE - len >= addr &&
2110 - (!vma || addr + len <= vma->vm_start))
2111 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2112 goto success;
2113 }
2114
2115 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2116 for (; vma; vma = vma->vm_next) {
2117 if (addr > limit)
2118 break;
2119 - if (addr + len <= vma->vm_start)
2120 + if (check_heap_stack_gap(vma, addr, len))
2121 goto success;
2122 addr = vma->vm_end;
2123 }
2124 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2125 for (; vma; vma = vma->vm_next) {
2126 if (addr > limit)
2127 break;
2128 - if (addr + len <= vma->vm_start)
2129 + if (check_heap_stack_gap(vma, addr, len))
2130 goto success;
2131 addr = vma->vm_end;
2132 }
2133 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2134 index c635028..6d9445a 100644
2135 --- a/arch/h8300/include/asm/cache.h
2136 +++ b/arch/h8300/include/asm/cache.h
2137 @@ -1,8 +1,10 @@
2138 #ifndef __ARCH_H8300_CACHE_H
2139 #define __ARCH_H8300_CACHE_H
2140
2141 +#include <linux/const.h>
2142 +
2143 /* bytes per L1 cache line */
2144 -#define L1_CACHE_BYTES 4
2145 +#define L1_CACHE_BYTES _AC(4,UL)
2146
2147 /* m68k-elf-gcc 2.95.2 doesn't like these */
2148
2149 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2150 index 0f01de2..d37d309 100644
2151 --- a/arch/hexagon/include/asm/cache.h
2152 +++ b/arch/hexagon/include/asm/cache.h
2153 @@ -21,9 +21,11 @@
2154 #ifndef __ASM_CACHE_H
2155 #define __ASM_CACHE_H
2156
2157 +#include <linux/const.h>
2158 +
2159 /* Bytes per L1 cache line */
2160 -#define L1_CACHE_SHIFT (5)
2161 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2162 +#define L1_CACHE_SHIFT 5
2163 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2164
2165 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2166 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2167 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2168 index 3fad89e..3047da5 100644
2169 --- a/arch/ia64/include/asm/atomic.h
2170 +++ b/arch/ia64/include/asm/atomic.h
2171 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2172 #define atomic64_inc(v) atomic64_add(1, (v))
2173 #define atomic64_dec(v) atomic64_sub(1, (v))
2174
2175 +#define atomic64_read_unchecked(v) atomic64_read(v)
2176 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2177 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2178 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2179 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2180 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2181 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2182 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2183 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2184 +
2185 /* Atomic operations are already serializing */
2186 #define smp_mb__before_atomic_dec() barrier()
2187 #define smp_mb__after_atomic_dec() barrier()
2188 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2189 index 988254a..e1ee885 100644
2190 --- a/arch/ia64/include/asm/cache.h
2191 +++ b/arch/ia64/include/asm/cache.h
2192 @@ -1,6 +1,7 @@
2193 #ifndef _ASM_IA64_CACHE_H
2194 #define _ASM_IA64_CACHE_H
2195
2196 +#include <linux/const.h>
2197
2198 /*
2199 * Copyright (C) 1998-2000 Hewlett-Packard Co
2200 @@ -9,7 +10,7 @@
2201
2202 /* Bytes per L1 (data) cache line. */
2203 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2204 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2205 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2206
2207 #ifdef CONFIG_SMP
2208 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2209 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2210 index b5298eb..67c6e62 100644
2211 --- a/arch/ia64/include/asm/elf.h
2212 +++ b/arch/ia64/include/asm/elf.h
2213 @@ -42,6 +42,13 @@
2214 */
2215 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2216
2217 +#ifdef CONFIG_PAX_ASLR
2218 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2219 +
2220 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2221 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2222 +#endif
2223 +
2224 #define PT_IA_64_UNWIND 0x70000001
2225
2226 /* IA-64 relocations: */
2227 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2228 index 1a97af3..7529d31 100644
2229 --- a/arch/ia64/include/asm/pgtable.h
2230 +++ b/arch/ia64/include/asm/pgtable.h
2231 @@ -12,7 +12,7 @@
2232 * David Mosberger-Tang <davidm@hpl.hp.com>
2233 */
2234
2235 -
2236 +#include <linux/const.h>
2237 #include <asm/mman.h>
2238 #include <asm/page.h>
2239 #include <asm/processor.h>
2240 @@ -143,6 +143,17 @@
2241 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2242 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2243 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2244 +
2245 +#ifdef CONFIG_PAX_PAGEEXEC
2246 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2247 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2248 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2249 +#else
2250 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2251 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2252 +# define PAGE_COPY_NOEXEC PAGE_COPY
2253 +#endif
2254 +
2255 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2256 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2257 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2258 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2259 index b77768d..e0795eb 100644
2260 --- a/arch/ia64/include/asm/spinlock.h
2261 +++ b/arch/ia64/include/asm/spinlock.h
2262 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2263 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2264
2265 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2266 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2267 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2268 }
2269
2270 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2271 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2272 index 449c8c0..432a3d2 100644
2273 --- a/arch/ia64/include/asm/uaccess.h
2274 +++ b/arch/ia64/include/asm/uaccess.h
2275 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2276 const void *__cu_from = (from); \
2277 long __cu_len = (n); \
2278 \
2279 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2280 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2281 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2282 __cu_len; \
2283 })
2284 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2285 long __cu_len = (n); \
2286 \
2287 __chk_user_ptr(__cu_from); \
2288 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2289 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2290 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2291 __cu_len; \
2292 })
2293 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2294 index 24603be..948052d 100644
2295 --- a/arch/ia64/kernel/module.c
2296 +++ b/arch/ia64/kernel/module.c
2297 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2298 void
2299 module_free (struct module *mod, void *module_region)
2300 {
2301 - if (mod && mod->arch.init_unw_table &&
2302 - module_region == mod->module_init) {
2303 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2304 unw_remove_unwind_table(mod->arch.init_unw_table);
2305 mod->arch.init_unw_table = NULL;
2306 }
2307 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2308 }
2309
2310 static inline int
2311 +in_init_rx (const struct module *mod, uint64_t addr)
2312 +{
2313 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2314 +}
2315 +
2316 +static inline int
2317 +in_init_rw (const struct module *mod, uint64_t addr)
2318 +{
2319 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2320 +}
2321 +
2322 +static inline int
2323 in_init (const struct module *mod, uint64_t addr)
2324 {
2325 - return addr - (uint64_t) mod->module_init < mod->init_size;
2326 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2327 +}
2328 +
2329 +static inline int
2330 +in_core_rx (const struct module *mod, uint64_t addr)
2331 +{
2332 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2333 +}
2334 +
2335 +static inline int
2336 +in_core_rw (const struct module *mod, uint64_t addr)
2337 +{
2338 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2339 }
2340
2341 static inline int
2342 in_core (const struct module *mod, uint64_t addr)
2343 {
2344 - return addr - (uint64_t) mod->module_core < mod->core_size;
2345 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2346 }
2347
2348 static inline int
2349 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2350 break;
2351
2352 case RV_BDREL:
2353 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2354 + if (in_init_rx(mod, val))
2355 + val -= (uint64_t) mod->module_init_rx;
2356 + else if (in_init_rw(mod, val))
2357 + val -= (uint64_t) mod->module_init_rw;
2358 + else if (in_core_rx(mod, val))
2359 + val -= (uint64_t) mod->module_core_rx;
2360 + else if (in_core_rw(mod, val))
2361 + val -= (uint64_t) mod->module_core_rw;
2362 break;
2363
2364 case RV_LTV:
2365 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2366 * addresses have been selected...
2367 */
2368 uint64_t gp;
2369 - if (mod->core_size > MAX_LTOFF)
2370 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2371 /*
2372 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2373 * at the end of the module.
2374 */
2375 - gp = mod->core_size - MAX_LTOFF / 2;
2376 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2377 else
2378 - gp = mod->core_size / 2;
2379 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2380 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2381 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2382 mod->arch.gp = gp;
2383 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2384 }
2385 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2386 index 609d500..7dde2a8 100644
2387 --- a/arch/ia64/kernel/sys_ia64.c
2388 +++ b/arch/ia64/kernel/sys_ia64.c
2389 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2390 if (REGION_NUMBER(addr) == RGN_HPAGE)
2391 addr = 0;
2392 #endif
2393 +
2394 +#ifdef CONFIG_PAX_RANDMMAP
2395 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2396 + addr = mm->free_area_cache;
2397 + else
2398 +#endif
2399 +
2400 if (!addr)
2401 addr = mm->free_area_cache;
2402
2403 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2404 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2405 /* At this point: (!vma || addr < vma->vm_end). */
2406 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2407 - if (start_addr != TASK_UNMAPPED_BASE) {
2408 + if (start_addr != mm->mmap_base) {
2409 /* Start a new search --- just in case we missed some holes. */
2410 - addr = TASK_UNMAPPED_BASE;
2411 + addr = mm->mmap_base;
2412 goto full_search;
2413 }
2414 return -ENOMEM;
2415 }
2416 - if (!vma || addr + len <= vma->vm_start) {
2417 + if (check_heap_stack_gap(vma, addr, len)) {
2418 /* Remember the address where we stopped this search: */
2419 mm->free_area_cache = addr + len;
2420 return addr;
2421 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2422 index 53c0ba0..2accdde 100644
2423 --- a/arch/ia64/kernel/vmlinux.lds.S
2424 +++ b/arch/ia64/kernel/vmlinux.lds.S
2425 @@ -199,7 +199,7 @@ SECTIONS {
2426 /* Per-cpu data: */
2427 . = ALIGN(PERCPU_PAGE_SIZE);
2428 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2429 - __phys_per_cpu_start = __per_cpu_load;
2430 + __phys_per_cpu_start = per_cpu_load;
2431 /*
2432 * ensure percpu data fits
2433 * into percpu page size
2434 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2435 index 20b3593..1ce77f0 100644
2436 --- a/arch/ia64/mm/fault.c
2437 +++ b/arch/ia64/mm/fault.c
2438 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2439 return pte_present(pte);
2440 }
2441
2442 +#ifdef CONFIG_PAX_PAGEEXEC
2443 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2444 +{
2445 + unsigned long i;
2446 +
2447 + printk(KERN_ERR "PAX: bytes at PC: ");
2448 + for (i = 0; i < 8; i++) {
2449 + unsigned int c;
2450 + if (get_user(c, (unsigned int *)pc+i))
2451 + printk(KERN_CONT "???????? ");
2452 + else
2453 + printk(KERN_CONT "%08x ", c);
2454 + }
2455 + printk("\n");
2456 +}
2457 +#endif
2458 +
2459 void __kprobes
2460 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2461 {
2462 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2463 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2464 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2465
2466 - if ((vma->vm_flags & mask) != mask)
2467 + if ((vma->vm_flags & mask) != mask) {
2468 +
2469 +#ifdef CONFIG_PAX_PAGEEXEC
2470 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2471 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2472 + goto bad_area;
2473 +
2474 + up_read(&mm->mmap_sem);
2475 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2476 + do_group_exit(SIGKILL);
2477 + }
2478 +#endif
2479 +
2480 goto bad_area;
2481
2482 + }
2483 +
2484 /*
2485 * If for any reason at all we couldn't handle the fault, make
2486 * sure we exit gracefully rather than endlessly redo the
2487 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2488 index 5ca674b..e0e1b70 100644
2489 --- a/arch/ia64/mm/hugetlbpage.c
2490 +++ b/arch/ia64/mm/hugetlbpage.c
2491 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2492 /* At this point: (!vmm || addr < vmm->vm_end). */
2493 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2494 return -ENOMEM;
2495 - if (!vmm || (addr + len) <= vmm->vm_start)
2496 + if (check_heap_stack_gap(vmm, addr, len))
2497 return addr;
2498 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2499 }
2500 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2501 index 13df239d..cb52116 100644
2502 --- a/arch/ia64/mm/init.c
2503 +++ b/arch/ia64/mm/init.c
2504 @@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2505 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2506 vma->vm_end = vma->vm_start + PAGE_SIZE;
2507 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2508 +
2509 +#ifdef CONFIG_PAX_PAGEEXEC
2510 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2511 + vma->vm_flags &= ~VM_EXEC;
2512 +
2513 +#ifdef CONFIG_PAX_MPROTECT
2514 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2515 + vma->vm_flags &= ~VM_MAYEXEC;
2516 +#endif
2517 +
2518 + }
2519 +#endif
2520 +
2521 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2522 down_write(&current->mm->mmap_sem);
2523 if (insert_vm_struct(current->mm, vma)) {
2524 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2525 index 40b3ee9..8c2c112 100644
2526 --- a/arch/m32r/include/asm/cache.h
2527 +++ b/arch/m32r/include/asm/cache.h
2528 @@ -1,8 +1,10 @@
2529 #ifndef _ASM_M32R_CACHE_H
2530 #define _ASM_M32R_CACHE_H
2531
2532 +#include <linux/const.h>
2533 +
2534 /* L1 cache line size */
2535 #define L1_CACHE_SHIFT 4
2536 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2537 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2538
2539 #endif /* _ASM_M32R_CACHE_H */
2540 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2541 index 82abd15..d95ae5d 100644
2542 --- a/arch/m32r/lib/usercopy.c
2543 +++ b/arch/m32r/lib/usercopy.c
2544 @@ -14,6 +14,9 @@
2545 unsigned long
2546 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2547 {
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 prefetch(from);
2552 if (access_ok(VERIFY_WRITE, to, n))
2553 __copy_user(to,from,n);
2554 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2555 unsigned long
2556 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2557 {
2558 + if ((long)n < 0)
2559 + return n;
2560 +
2561 prefetchw(to);
2562 if (access_ok(VERIFY_READ, from, n))
2563 __copy_user_zeroing(to,from,n);
2564 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2565 index 0395c51..5f26031 100644
2566 --- a/arch/m68k/include/asm/cache.h
2567 +++ b/arch/m68k/include/asm/cache.h
2568 @@ -4,9 +4,11 @@
2569 #ifndef __ARCH_M68K_CACHE_H
2570 #define __ARCH_M68K_CACHE_H
2571
2572 +#include <linux/const.h>
2573 +
2574 /* bytes per L1 cache line */
2575 #define L1_CACHE_SHIFT 4
2576 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2577 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2578
2579 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2580
2581 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2582 index 4efe96a..60e8699 100644
2583 --- a/arch/microblaze/include/asm/cache.h
2584 +++ b/arch/microblaze/include/asm/cache.h
2585 @@ -13,11 +13,12 @@
2586 #ifndef _ASM_MICROBLAZE_CACHE_H
2587 #define _ASM_MICROBLAZE_CACHE_H
2588
2589 +#include <linux/const.h>
2590 #include <asm/registers.h>
2591
2592 #define L1_CACHE_SHIFT 5
2593 /* word-granular cache in microblaze */
2594 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2595 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2596
2597 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2598
2599 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2600 index 1d93f81..67794d0 100644
2601 --- a/arch/mips/include/asm/atomic.h
2602 +++ b/arch/mips/include/asm/atomic.h
2603 @@ -21,6 +21,10 @@
2604 #include <asm/war.h>
2605 #include <asm/system.h>
2606
2607 +#ifdef CONFIG_GENERIC_ATOMIC64
2608 +#include <asm-generic/atomic64.h>
2609 +#endif
2610 +
2611 #define ATOMIC_INIT(i) { (i) }
2612
2613 /*
2614 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2615 */
2616 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2617
2618 +#define atomic64_read_unchecked(v) atomic64_read(v)
2619 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2620 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2621 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2622 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2623 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2624 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2625 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2626 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2627 +
2628 #endif /* CONFIG_64BIT */
2629
2630 /*
2631 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2632 index b4db69f..8f3b093 100644
2633 --- a/arch/mips/include/asm/cache.h
2634 +++ b/arch/mips/include/asm/cache.h
2635 @@ -9,10 +9,11 @@
2636 #ifndef _ASM_CACHE_H
2637 #define _ASM_CACHE_H
2638
2639 +#include <linux/const.h>
2640 #include <kmalloc.h>
2641
2642 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2643 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2644 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2645
2646 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2647 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2648 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2649 index 455c0ac..ad65fbe 100644
2650 --- a/arch/mips/include/asm/elf.h
2651 +++ b/arch/mips/include/asm/elf.h
2652 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2653 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2654 #endif
2655
2656 +#ifdef CONFIG_PAX_ASLR
2657 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2658 +
2659 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2660 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2661 +#endif
2662 +
2663 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2664 struct linux_binprm;
2665 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2666 int uses_interp);
2667
2668 -struct mm_struct;
2669 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2670 -#define arch_randomize_brk arch_randomize_brk
2671 -
2672 #endif /* _ASM_ELF_H */
2673 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2674 index da9bd7d..91aa7ab 100644
2675 --- a/arch/mips/include/asm/page.h
2676 +++ b/arch/mips/include/asm/page.h
2677 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2678 #ifdef CONFIG_CPU_MIPS32
2679 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2680 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2681 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2682 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2683 #else
2684 typedef struct { unsigned long long pte; } pte_t;
2685 #define pte_val(x) ((x).pte)
2686 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2687 index 6018c80..7c37203 100644
2688 --- a/arch/mips/include/asm/system.h
2689 +++ b/arch/mips/include/asm/system.h
2690 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2691 */
2692 #define __ARCH_WANT_UNLOCKED_CTXSW
2693
2694 -extern unsigned long arch_align_stack(unsigned long sp);
2695 +#define arch_align_stack(x) ((x) & ~0xfUL)
2696
2697 #endif /* _ASM_SYSTEM_H */
2698 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2699 index 9fdd8bc..4bd7f1a 100644
2700 --- a/arch/mips/kernel/binfmt_elfn32.c
2701 +++ b/arch/mips/kernel/binfmt_elfn32.c
2702 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2703 #undef ELF_ET_DYN_BASE
2704 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2705
2706 +#ifdef CONFIG_PAX_ASLR
2707 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2708 +
2709 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2710 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2711 +#endif
2712 +
2713 #include <asm/processor.h>
2714 #include <linux/module.h>
2715 #include <linux/elfcore.h>
2716 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2717 index ff44823..97f8906 100644
2718 --- a/arch/mips/kernel/binfmt_elfo32.c
2719 +++ b/arch/mips/kernel/binfmt_elfo32.c
2720 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2721 #undef ELF_ET_DYN_BASE
2722 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2723
2724 +#ifdef CONFIG_PAX_ASLR
2725 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2726 +
2727 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2728 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2729 +#endif
2730 +
2731 #include <asm/processor.h>
2732
2733 /*
2734 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2735 index 7955409..ceaea7c 100644
2736 --- a/arch/mips/kernel/process.c
2737 +++ b/arch/mips/kernel/process.c
2738 @@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2739 out:
2740 return pc;
2741 }
2742 -
2743 -/*
2744 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2745 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2746 - */
2747 -unsigned long arch_align_stack(unsigned long sp)
2748 -{
2749 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750 - sp -= get_random_int() & ~PAGE_MASK;
2751 -
2752 - return sp & ALMASK;
2753 -}
2754 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2755 index 69ebd58..e4bff83 100644
2756 --- a/arch/mips/mm/fault.c
2757 +++ b/arch/mips/mm/fault.c
2758 @@ -28,6 +28,23 @@
2759 #include <asm/highmem.h> /* For VMALLOC_END */
2760 #include <linux/kdebug.h>
2761
2762 +#ifdef CONFIG_PAX_PAGEEXEC
2763 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2764 +{
2765 + unsigned long i;
2766 +
2767 + printk(KERN_ERR "PAX: bytes at PC: ");
2768 + for (i = 0; i < 5; i++) {
2769 + unsigned int c;
2770 + if (get_user(c, (unsigned int *)pc+i))
2771 + printk(KERN_CONT "???????? ");
2772 + else
2773 + printk(KERN_CONT "%08x ", c);
2774 + }
2775 + printk("\n");
2776 +}
2777 +#endif
2778 +
2779 /*
2780 * This routine handles page faults. It determines the address,
2781 * and the problem, and then passes it off to one of the appropriate
2782 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2783 index 302d779..7d35bf8 100644
2784 --- a/arch/mips/mm/mmap.c
2785 +++ b/arch/mips/mm/mmap.c
2786 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2787 do_color_align = 1;
2788
2789 /* requesting a specific address */
2790 +
2791 +#ifdef CONFIG_PAX_RANDMMAP
2792 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2793 +#endif
2794 +
2795 if (addr) {
2796 if (do_color_align)
2797 addr = COLOUR_ALIGN(addr, pgoff);
2798 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2799 addr = PAGE_ALIGN(addr);
2800
2801 vma = find_vma(mm, addr);
2802 - if (TASK_SIZE - len >= addr &&
2803 - (!vma || addr + len <= vma->vm_start))
2804 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2805 return addr;
2806 }
2807
2808 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2809 /* At this point: (!vma || addr < vma->vm_end). */
2810 if (TASK_SIZE - len < addr)
2811 return -ENOMEM;
2812 - if (!vma || addr + len <= vma->vm_start)
2813 + if (check_heap_stack_gap(vmm, addr, len))
2814 return addr;
2815 addr = vma->vm_end;
2816 if (do_color_align)
2817 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2818 /* make sure it can fit in the remaining address space */
2819 if (likely(addr > len)) {
2820 vma = find_vma(mm, addr - len);
2821 - if (!vma || addr <= vma->vm_start) {
2822 + if (check_heap_stack_gap(vmm, addr - len, len))
2823 /* cache the address as a hint for next time */
2824 return mm->free_area_cache = addr - len;
2825 }
2826 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2827 * return with success:
2828 */
2829 vma = find_vma(mm, addr);
2830 - if (likely(!vma || addr + len <= vma->vm_start)) {
2831 + if (check_heap_stack_gap(vmm, addr, len)) {
2832 /* cache the address as a hint for next time */
2833 return mm->free_area_cache = addr;
2834 }
2835 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2836 mm->unmap_area = arch_unmap_area_topdown;
2837 }
2838 }
2839 -
2840 -static inline unsigned long brk_rnd(void)
2841 -{
2842 - unsigned long rnd = get_random_int();
2843 -
2844 - rnd = rnd << PAGE_SHIFT;
2845 - /* 8MB for 32bit, 256MB for 64bit */
2846 - if (TASK_IS_32BIT_ADDR)
2847 - rnd = rnd & 0x7ffffful;
2848 - else
2849 - rnd = rnd & 0xffffffful;
2850 -
2851 - return rnd;
2852 -}
2853 -
2854 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2855 -{
2856 - unsigned long base = mm->brk;
2857 - unsigned long ret;
2858 -
2859 - ret = PAGE_ALIGN(base + brk_rnd());
2860 -
2861 - if (ret < mm->brk)
2862 - return mm->brk;
2863 -
2864 - return ret;
2865 -}
2866 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2867 index 967d144..db12197 100644
2868 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2869 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2870 @@ -11,12 +11,14 @@
2871 #ifndef _ASM_PROC_CACHE_H
2872 #define _ASM_PROC_CACHE_H
2873
2874 +#include <linux/const.h>
2875 +
2876 /* L1 cache */
2877
2878 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2879 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2880 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2881 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2882 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2883 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2884
2885 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2886 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2887 index bcb5df2..84fabd2 100644
2888 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2889 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2890 @@ -16,13 +16,15 @@
2891 #ifndef _ASM_PROC_CACHE_H
2892 #define _ASM_PROC_CACHE_H
2893
2894 +#include <linux/const.h>
2895 +
2896 /*
2897 * L1 cache
2898 */
2899 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2900 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2901 -#define L1_CACHE_BYTES 32 /* bytes per entry */
2902 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2903 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2904 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2905
2906 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2907 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2908 index 4ce7a01..449202a 100644
2909 --- a/arch/openrisc/include/asm/cache.h
2910 +++ b/arch/openrisc/include/asm/cache.h
2911 @@ -19,11 +19,13 @@
2912 #ifndef __ASM_OPENRISC_CACHE_H
2913 #define __ASM_OPENRISC_CACHE_H
2914
2915 +#include <linux/const.h>
2916 +
2917 /* FIXME: How can we replace these with values from the CPU...
2918 * they shouldn't be hard-coded!
2919 */
2920
2921 -#define L1_CACHE_BYTES 16
2922 #define L1_CACHE_SHIFT 4
2923 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2924
2925 #endif /* __ASM_OPENRISC_CACHE_H */
2926 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2927 index 4054b31..a10c105 100644
2928 --- a/arch/parisc/include/asm/atomic.h
2929 +++ b/arch/parisc/include/asm/atomic.h
2930 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2931
2932 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2933
2934 +#define atomic64_read_unchecked(v) atomic64_read(v)
2935 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2936 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2937 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2938 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2939 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2940 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2941 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2942 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2943 +
2944 #endif /* !CONFIG_64BIT */
2945
2946
2947 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2948 index 47f11c7..3420df2 100644
2949 --- a/arch/parisc/include/asm/cache.h
2950 +++ b/arch/parisc/include/asm/cache.h
2951 @@ -5,6 +5,7 @@
2952 #ifndef __ARCH_PARISC_CACHE_H
2953 #define __ARCH_PARISC_CACHE_H
2954
2955 +#include <linux/const.h>
2956
2957 /*
2958 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2959 @@ -15,13 +16,13 @@
2960 * just ruin performance.
2961 */
2962 #ifdef CONFIG_PA20
2963 -#define L1_CACHE_BYTES 64
2964 #define L1_CACHE_SHIFT 6
2965 #else
2966 -#define L1_CACHE_BYTES 32
2967 #define L1_CACHE_SHIFT 5
2968 #endif
2969
2970 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2971 +
2972 #ifndef __ASSEMBLY__
2973
2974 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2975 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2976 index 19f6cb1..6c78cf2 100644
2977 --- a/arch/parisc/include/asm/elf.h
2978 +++ b/arch/parisc/include/asm/elf.h
2979 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2980
2981 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2982
2983 +#ifdef CONFIG_PAX_ASLR
2984 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2985 +
2986 +#define PAX_DELTA_MMAP_LEN 16
2987 +#define PAX_DELTA_STACK_LEN 16
2988 +#endif
2989 +
2990 /* This yields a mask that user programs can use to figure out what
2991 instruction set this CPU supports. This could be done in user space,
2992 but it's not easy, and we've already done it here. */
2993 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2994 index 22dadeb..f6c2be4 100644
2995 --- a/arch/parisc/include/asm/pgtable.h
2996 +++ b/arch/parisc/include/asm/pgtable.h
2997 @@ -210,6 +210,17 @@ struct vm_area_struct;
2998 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2999 #define PAGE_COPY PAGE_EXECREAD
3000 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3001 +
3002 +#ifdef CONFIG_PAX_PAGEEXEC
3003 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3004 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3005 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3006 +#else
3007 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3008 +# define PAGE_COPY_NOEXEC PAGE_COPY
3009 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3010 +#endif
3011 +
3012 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3013 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3014 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3015 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3016 index 5e34ccf..672bc9c 100644
3017 --- a/arch/parisc/kernel/module.c
3018 +++ b/arch/parisc/kernel/module.c
3019 @@ -98,16 +98,38 @@
3020
3021 /* three functions to determine where in the module core
3022 * or init pieces the location is */
3023 +static inline int in_init_rx(struct module *me, void *loc)
3024 +{
3025 + return (loc >= me->module_init_rx &&
3026 + loc < (me->module_init_rx + me->init_size_rx));
3027 +}
3028 +
3029 +static inline int in_init_rw(struct module *me, void *loc)
3030 +{
3031 + return (loc >= me->module_init_rw &&
3032 + loc < (me->module_init_rw + me->init_size_rw));
3033 +}
3034 +
3035 static inline int in_init(struct module *me, void *loc)
3036 {
3037 - return (loc >= me->module_init &&
3038 - loc <= (me->module_init + me->init_size));
3039 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3040 +}
3041 +
3042 +static inline int in_core_rx(struct module *me, void *loc)
3043 +{
3044 + return (loc >= me->module_core_rx &&
3045 + loc < (me->module_core_rx + me->core_size_rx));
3046 +}
3047 +
3048 +static inline int in_core_rw(struct module *me, void *loc)
3049 +{
3050 + return (loc >= me->module_core_rw &&
3051 + loc < (me->module_core_rw + me->core_size_rw));
3052 }
3053
3054 static inline int in_core(struct module *me, void *loc)
3055 {
3056 - return (loc >= me->module_core &&
3057 - loc <= (me->module_core + me->core_size));
3058 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3059 }
3060
3061 static inline int in_local(struct module *me, void *loc)
3062 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3063 }
3064
3065 /* align things a bit */
3066 - me->core_size = ALIGN(me->core_size, 16);
3067 - me->arch.got_offset = me->core_size;
3068 - me->core_size += gots * sizeof(struct got_entry);
3069 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3070 + me->arch.got_offset = me->core_size_rw;
3071 + me->core_size_rw += gots * sizeof(struct got_entry);
3072
3073 - me->core_size = ALIGN(me->core_size, 16);
3074 - me->arch.fdesc_offset = me->core_size;
3075 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3076 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3077 + me->arch.fdesc_offset = me->core_size_rw;
3078 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3079
3080 me->arch.got_max = gots;
3081 me->arch.fdesc_max = fdescs;
3082 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3083
3084 BUG_ON(value == 0);
3085
3086 - got = me->module_core + me->arch.got_offset;
3087 + got = me->module_core_rw + me->arch.got_offset;
3088 for (i = 0; got[i].addr; i++)
3089 if (got[i].addr == value)
3090 goto out;
3091 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3092 #ifdef CONFIG_64BIT
3093 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3094 {
3095 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3096 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3097
3098 if (!value) {
3099 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3100 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3101
3102 /* Create new one */
3103 fdesc->addr = value;
3104 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3105 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3106 return (Elf_Addr)fdesc;
3107 }
3108 #endif /* CONFIG_64BIT */
3109 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3110
3111 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3112 end = table + sechdrs[me->arch.unwind_section].sh_size;
3113 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3114 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3115
3116 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3117 me->arch.unwind_section, table, end, gp);
3118 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3119 index c9b9322..02d8940 100644
3120 --- a/arch/parisc/kernel/sys_parisc.c
3121 +++ b/arch/parisc/kernel/sys_parisc.c
3122 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3123 /* At this point: (!vma || addr < vma->vm_end). */
3124 if (TASK_SIZE - len < addr)
3125 return -ENOMEM;
3126 - if (!vma || addr + len <= vma->vm_start)
3127 + if (check_heap_stack_gap(vma, addr, len))
3128 return addr;
3129 addr = vma->vm_end;
3130 }
3131 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3132 /* At this point: (!vma || addr < vma->vm_end). */
3133 if (TASK_SIZE - len < addr)
3134 return -ENOMEM;
3135 - if (!vma || addr + len <= vma->vm_start)
3136 + if (check_heap_stack_gap(vma, addr, len))
3137 return addr;
3138 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3139 if (addr < vma->vm_end) /* handle wraparound */
3140 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3141 if (flags & MAP_FIXED)
3142 return addr;
3143 if (!addr)
3144 - addr = TASK_UNMAPPED_BASE;
3145 + addr = current->mm->mmap_base;
3146
3147 if (filp) {
3148 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3149 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3150 index f19e660..414fe24 100644
3151 --- a/arch/parisc/kernel/traps.c
3152 +++ b/arch/parisc/kernel/traps.c
3153 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3154
3155 down_read(&current->mm->mmap_sem);
3156 vma = find_vma(current->mm,regs->iaoq[0]);
3157 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3158 - && (vma->vm_flags & VM_EXEC)) {
3159 -
3160 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3161 fault_address = regs->iaoq[0];
3162 fault_space = regs->iasq[0];
3163
3164 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3165 index 18162ce..94de376 100644
3166 --- a/arch/parisc/mm/fault.c
3167 +++ b/arch/parisc/mm/fault.c
3168 @@ -15,6 +15,7 @@
3169 #include <linux/sched.h>
3170 #include <linux/interrupt.h>
3171 #include <linux/module.h>
3172 +#include <linux/unistd.h>
3173
3174 #include <asm/uaccess.h>
3175 #include <asm/traps.h>
3176 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3177 static unsigned long
3178 parisc_acctyp(unsigned long code, unsigned int inst)
3179 {
3180 - if (code == 6 || code == 16)
3181 + if (code == 6 || code == 7 || code == 16)
3182 return VM_EXEC;
3183
3184 switch (inst & 0xf0000000) {
3185 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3186 }
3187 #endif
3188
3189 +#ifdef CONFIG_PAX_PAGEEXEC
3190 +/*
3191 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3192 + *
3193 + * returns 1 when task should be killed
3194 + * 2 when rt_sigreturn trampoline was detected
3195 + * 3 when unpatched PLT trampoline was detected
3196 + */
3197 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3198 +{
3199 +
3200 +#ifdef CONFIG_PAX_EMUPLT
3201 + int err;
3202 +
3203 + do { /* PaX: unpatched PLT emulation */
3204 + unsigned int bl, depwi;
3205 +
3206 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3207 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3208 +
3209 + if (err)
3210 + break;
3211 +
3212 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3213 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3214 +
3215 + err = get_user(ldw, (unsigned int *)addr);
3216 + err |= get_user(bv, (unsigned int *)(addr+4));
3217 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3218 +
3219 + if (err)
3220 + break;
3221 +
3222 + if (ldw == 0x0E801096U &&
3223 + bv == 0xEAC0C000U &&
3224 + ldw2 == 0x0E881095U)
3225 + {
3226 + unsigned int resolver, map;
3227 +
3228 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3229 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3230 + if (err)
3231 + break;
3232 +
3233 + regs->gr[20] = instruction_pointer(regs)+8;
3234 + regs->gr[21] = map;
3235 + regs->gr[22] = resolver;
3236 + regs->iaoq[0] = resolver | 3UL;
3237 + regs->iaoq[1] = regs->iaoq[0] + 4;
3238 + return 3;
3239 + }
3240 + }
3241 + } while (0);
3242 +#endif
3243 +
3244 +#ifdef CONFIG_PAX_EMUTRAMP
3245 +
3246 +#ifndef CONFIG_PAX_EMUSIGRT
3247 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3248 + return 1;
3249 +#endif
3250 +
3251 + do { /* PaX: rt_sigreturn emulation */
3252 + unsigned int ldi1, ldi2, bel, nop;
3253 +
3254 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3255 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3256 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3257 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3258 +
3259 + if (err)
3260 + break;
3261 +
3262 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3263 + ldi2 == 0x3414015AU &&
3264 + bel == 0xE4008200U &&
3265 + nop == 0x08000240U)
3266 + {
3267 + regs->gr[25] = (ldi1 & 2) >> 1;
3268 + regs->gr[20] = __NR_rt_sigreturn;
3269 + regs->gr[31] = regs->iaoq[1] + 16;
3270 + regs->sr[0] = regs->iasq[1];
3271 + regs->iaoq[0] = 0x100UL;
3272 + regs->iaoq[1] = regs->iaoq[0] + 4;
3273 + regs->iasq[0] = regs->sr[2];
3274 + regs->iasq[1] = regs->sr[2];
3275 + return 2;
3276 + }
3277 + } while (0);
3278 +#endif
3279 +
3280 + return 1;
3281 +}
3282 +
3283 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3284 +{
3285 + unsigned long i;
3286 +
3287 + printk(KERN_ERR "PAX: bytes at PC: ");
3288 + for (i = 0; i < 5; i++) {
3289 + unsigned int c;
3290 + if (get_user(c, (unsigned int *)pc+i))
3291 + printk(KERN_CONT "???????? ");
3292 + else
3293 + printk(KERN_CONT "%08x ", c);
3294 + }
3295 + printk("\n");
3296 +}
3297 +#endif
3298 +
3299 int fixup_exception(struct pt_regs *regs)
3300 {
3301 const struct exception_table_entry *fix;
3302 @@ -192,8 +303,33 @@ good_area:
3303
3304 acc_type = parisc_acctyp(code,regs->iir);
3305
3306 - if ((vma->vm_flags & acc_type) != acc_type)
3307 + if ((vma->vm_flags & acc_type) != acc_type) {
3308 +
3309 +#ifdef CONFIG_PAX_PAGEEXEC
3310 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3311 + (address & ~3UL) == instruction_pointer(regs))
3312 + {
3313 + up_read(&mm->mmap_sem);
3314 + switch (pax_handle_fetch_fault(regs)) {
3315 +
3316 +#ifdef CONFIG_PAX_EMUPLT
3317 + case 3:
3318 + return;
3319 +#endif
3320 +
3321 +#ifdef CONFIG_PAX_EMUTRAMP
3322 + case 2:
3323 + return;
3324 +#endif
3325 +
3326 + }
3327 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3328 + do_group_exit(SIGKILL);
3329 + }
3330 +#endif
3331 +
3332 goto bad_area;
3333 + }
3334
3335 /*
3336 * If for any reason at all we couldn't handle the fault, make
3337 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3338 index 02e41b5..ec6e26c 100644
3339 --- a/arch/powerpc/include/asm/atomic.h
3340 +++ b/arch/powerpc/include/asm/atomic.h
3341 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3342
3343 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3344
3345 +#define atomic64_read_unchecked(v) atomic64_read(v)
3346 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3347 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3348 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3349 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3350 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3351 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3352 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3353 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3354 +
3355 #endif /* __powerpc64__ */
3356
3357 #endif /* __KERNEL__ */
3358 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3359 index 4b50941..5605819 100644
3360 --- a/arch/powerpc/include/asm/cache.h
3361 +++ b/arch/powerpc/include/asm/cache.h
3362 @@ -3,6 +3,7 @@
3363
3364 #ifdef __KERNEL__
3365
3366 +#include <linux/const.h>
3367
3368 /* bytes per L1 cache line */
3369 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3370 @@ -22,7 +23,7 @@
3371 #define L1_CACHE_SHIFT 7
3372 #endif
3373
3374 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3375 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3376
3377 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3378
3379 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3380 index 3bf9cca..e7457d0 100644
3381 --- a/arch/powerpc/include/asm/elf.h
3382 +++ b/arch/powerpc/include/asm/elf.h
3383 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3384 the loader. We need to make sure that it is out of the way of the program
3385 that it will "exec", and that there is sufficient room for the brk. */
3386
3387 -extern unsigned long randomize_et_dyn(unsigned long base);
3388 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3389 +#define ELF_ET_DYN_BASE (0x20000000)
3390 +
3391 +#ifdef CONFIG_PAX_ASLR
3392 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3393 +
3394 +#ifdef __powerpc64__
3395 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3396 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3397 +#else
3398 +#define PAX_DELTA_MMAP_LEN 15
3399 +#define PAX_DELTA_STACK_LEN 15
3400 +#endif
3401 +#endif
3402
3403 /*
3404 * Our registers are always unsigned longs, whether we're a 32 bit
3405 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3406 (0x7ff >> (PAGE_SHIFT - 12)) : \
3407 (0x3ffff >> (PAGE_SHIFT - 12)))
3408
3409 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3410 -#define arch_randomize_brk arch_randomize_brk
3411 -
3412 #endif /* __KERNEL__ */
3413
3414 /*
3415 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3416 index bca8fdc..61e9580 100644
3417 --- a/arch/powerpc/include/asm/kmap_types.h
3418 +++ b/arch/powerpc/include/asm/kmap_types.h
3419 @@ -27,6 +27,7 @@ enum km_type {
3420 KM_PPC_SYNC_PAGE,
3421 KM_PPC_SYNC_ICACHE,
3422 KM_KDB,
3423 + KM_CLEARPAGE,
3424 KM_TYPE_NR
3425 };
3426
3427 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3428 index d4a7f64..451de1c 100644
3429 --- a/arch/powerpc/include/asm/mman.h
3430 +++ b/arch/powerpc/include/asm/mman.h
3431 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3432 }
3433 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3434
3435 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3436 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3437 {
3438 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3439 }
3440 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3441 index f072e97..b436dee 100644
3442 --- a/arch/powerpc/include/asm/page.h
3443 +++ b/arch/powerpc/include/asm/page.h
3444 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3445 * and needs to be executable. This means the whole heap ends
3446 * up being executable.
3447 */
3448 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3449 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3450 +#define VM_DATA_DEFAULT_FLAGS32 \
3451 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3452 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3453
3454 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3455 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3456 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3457 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3458 #endif
3459
3460 +#define ktla_ktva(addr) (addr)
3461 +#define ktva_ktla(addr) (addr)
3462 +
3463 /*
3464 * Use the top bit of the higher-level page table entries to indicate whether
3465 * the entries we point to contain hugepages. This works because we know that
3466 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3467 index fed85e6..da5c71b 100644
3468 --- a/arch/powerpc/include/asm/page_64.h
3469 +++ b/arch/powerpc/include/asm/page_64.h
3470 @@ -146,15 +146,18 @@ do { \
3471 * stack by default, so in the absence of a PT_GNU_STACK program header
3472 * we turn execute permission off.
3473 */
3474 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3475 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3476 +#define VM_STACK_DEFAULT_FLAGS32 \
3477 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3478 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3479
3480 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3481 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3482
3483 +#ifndef CONFIG_PAX_PAGEEXEC
3484 #define VM_STACK_DEFAULT_FLAGS \
3485 (is_32bit_task() ? \
3486 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3487 +#endif
3488
3489 #include <asm-generic/getorder.h>
3490
3491 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3492 index 2e0e411..7899c68 100644
3493 --- a/arch/powerpc/include/asm/pgtable.h
3494 +++ b/arch/powerpc/include/asm/pgtable.h
3495 @@ -2,6 +2,7 @@
3496 #define _ASM_POWERPC_PGTABLE_H
3497 #ifdef __KERNEL__
3498
3499 +#include <linux/const.h>
3500 #ifndef __ASSEMBLY__
3501 #include <asm/processor.h> /* For TASK_SIZE */
3502 #include <asm/mmu.h>
3503 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3504 index 4aad413..85d86bf 100644
3505 --- a/arch/powerpc/include/asm/pte-hash32.h
3506 +++ b/arch/powerpc/include/asm/pte-hash32.h
3507 @@ -21,6 +21,7 @@
3508 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3509 #define _PAGE_USER 0x004 /* usermode access allowed */
3510 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3511 +#define _PAGE_EXEC _PAGE_GUARDED
3512 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3513 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3514 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3515 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3516 index 7fdc2c0..e47a9b02d3 100644
3517 --- a/arch/powerpc/include/asm/reg.h
3518 +++ b/arch/powerpc/include/asm/reg.h
3519 @@ -212,6 +212,7 @@
3520 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3521 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3522 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3523 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3524 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3525 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3526 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3527 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3528 index c377457..3c69fbc 100644
3529 --- a/arch/powerpc/include/asm/system.h
3530 +++ b/arch/powerpc/include/asm/system.h
3531 @@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3532 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3533 #endif
3534
3535 -extern unsigned long arch_align_stack(unsigned long sp);
3536 +#define arch_align_stack(x) ((x) & ~0xfUL)
3537
3538 /* Used in very early kernel initialization. */
3539 extern unsigned long reloc_offset(void);
3540 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3541 index bd0fb84..a42a14b 100644
3542 --- a/arch/powerpc/include/asm/uaccess.h
3543 +++ b/arch/powerpc/include/asm/uaccess.h
3544 @@ -13,6 +13,8 @@
3545 #define VERIFY_READ 0
3546 #define VERIFY_WRITE 1
3547
3548 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3549 +
3550 /*
3551 * The fs value determines whether argument validity checking should be
3552 * performed or not. If get_fs() == USER_DS, checking is performed, with
3553 @@ -327,52 +329,6 @@ do { \
3554 extern unsigned long __copy_tofrom_user(void __user *to,
3555 const void __user *from, unsigned long size);
3556
3557 -#ifndef __powerpc64__
3558 -
3559 -static inline unsigned long copy_from_user(void *to,
3560 - const void __user *from, unsigned long n)
3561 -{
3562 - unsigned long over;
3563 -
3564 - if (access_ok(VERIFY_READ, from, n))
3565 - return __copy_tofrom_user((__force void __user *)to, from, n);
3566 - if ((unsigned long)from < TASK_SIZE) {
3567 - over = (unsigned long)from + n - TASK_SIZE;
3568 - return __copy_tofrom_user((__force void __user *)to, from,
3569 - n - over) + over;
3570 - }
3571 - return n;
3572 -}
3573 -
3574 -static inline unsigned long copy_to_user(void __user *to,
3575 - const void *from, unsigned long n)
3576 -{
3577 - unsigned long over;
3578 -
3579 - if (access_ok(VERIFY_WRITE, to, n))
3580 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3581 - if ((unsigned long)to < TASK_SIZE) {
3582 - over = (unsigned long)to + n - TASK_SIZE;
3583 - return __copy_tofrom_user(to, (__force void __user *)from,
3584 - n - over) + over;
3585 - }
3586 - return n;
3587 -}
3588 -
3589 -#else /* __powerpc64__ */
3590 -
3591 -#define __copy_in_user(to, from, size) \
3592 - __copy_tofrom_user((to), (from), (size))
3593 -
3594 -extern unsigned long copy_from_user(void *to, const void __user *from,
3595 - unsigned long n);
3596 -extern unsigned long copy_to_user(void __user *to, const void *from,
3597 - unsigned long n);
3598 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3599 - unsigned long n);
3600 -
3601 -#endif /* __powerpc64__ */
3602 -
3603 static inline unsigned long __copy_from_user_inatomic(void *to,
3604 const void __user *from, unsigned long n)
3605 {
3606 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3607 if (ret == 0)
3608 return 0;
3609 }
3610 +
3611 + if (!__builtin_constant_p(n))
3612 + check_object_size(to, n, false);
3613 +
3614 return __copy_tofrom_user((__force void __user *)to, from, n);
3615 }
3616
3617 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3618 if (ret == 0)
3619 return 0;
3620 }
3621 +
3622 + if (!__builtin_constant_p(n))
3623 + check_object_size(from, n, true);
3624 +
3625 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3626 }
3627
3628 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3629 return __copy_to_user_inatomic(to, from, size);
3630 }
3631
3632 +#ifndef __powerpc64__
3633 +
3634 +static inline unsigned long __must_check copy_from_user(void *to,
3635 + const void __user *from, unsigned long n)
3636 +{
3637 + unsigned long over;
3638 +
3639 + if ((long)n < 0)
3640 + return n;
3641 +
3642 + if (access_ok(VERIFY_READ, from, n)) {
3643 + if (!__builtin_constant_p(n))
3644 + check_object_size(to, n, false);
3645 + return __copy_tofrom_user((__force void __user *)to, from, n);
3646 + }
3647 + if ((unsigned long)from < TASK_SIZE) {
3648 + over = (unsigned long)from + n - TASK_SIZE;
3649 + if (!__builtin_constant_p(n - over))
3650 + check_object_size(to, n - over, false);
3651 + return __copy_tofrom_user((__force void __user *)to, from,
3652 + n - over) + over;
3653 + }
3654 + return n;
3655 +}
3656 +
3657 +static inline unsigned long __must_check copy_to_user(void __user *to,
3658 + const void *from, unsigned long n)
3659 +{
3660 + unsigned long over;
3661 +
3662 + if ((long)n < 0)
3663 + return n;
3664 +
3665 + if (access_ok(VERIFY_WRITE, to, n)) {
3666 + if (!__builtin_constant_p(n))
3667 + check_object_size(from, n, true);
3668 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3669 + }
3670 + if ((unsigned long)to < TASK_SIZE) {
3671 + over = (unsigned long)to + n - TASK_SIZE;
3672 + if (!__builtin_constant_p(n))
3673 + check_object_size(from, n - over, true);
3674 + return __copy_tofrom_user(to, (__force void __user *)from,
3675 + n - over) + over;
3676 + }
3677 + return n;
3678 +}
3679 +
3680 +#else /* __powerpc64__ */
3681 +
3682 +#define __copy_in_user(to, from, size) \
3683 + __copy_tofrom_user((to), (from), (size))
3684 +
3685 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3686 +{
3687 + if ((long)n < 0 || n > INT_MAX)
3688 + return n;
3689 +
3690 + if (!__builtin_constant_p(n))
3691 + check_object_size(to, n, false);
3692 +
3693 + if (likely(access_ok(VERIFY_READ, from, n)))
3694 + n = __copy_from_user(to, from, n);
3695 + else
3696 + memset(to, 0, n);
3697 + return n;
3698 +}
3699 +
3700 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3701 +{
3702 + if ((long)n < 0 || n > INT_MAX)
3703 + return n;
3704 +
3705 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3706 + if (!__builtin_constant_p(n))
3707 + check_object_size(from, n, true);
3708 + n = __copy_to_user(to, from, n);
3709 + }
3710 + return n;
3711 +}
3712 +
3713 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3714 + unsigned long n);
3715 +
3716 +#endif /* __powerpc64__ */
3717 +
3718 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3719
3720 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3721 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3722 index 429983c..7af363b 100644
3723 --- a/arch/powerpc/kernel/exceptions-64e.S
3724 +++ b/arch/powerpc/kernel/exceptions-64e.S
3725 @@ -587,6 +587,7 @@ storage_fault_common:
3726 std r14,_DAR(r1)
3727 std r15,_DSISR(r1)
3728 addi r3,r1,STACK_FRAME_OVERHEAD
3729 + bl .save_nvgprs
3730 mr r4,r14
3731 mr r5,r15
3732 ld r14,PACA_EXGEN+EX_R14(r13)
3733 @@ -596,8 +597,7 @@ storage_fault_common:
3734 cmpdi r3,0
3735 bne- 1f
3736 b .ret_from_except_lite
3737 -1: bl .save_nvgprs
3738 - mr r5,r3
3739 +1: mr r5,r3
3740 addi r3,r1,STACK_FRAME_OVERHEAD
3741 ld r4,_DAR(r1)
3742 bl .bad_page_fault
3743 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3744 index 15c5a4f..22a4000 100644
3745 --- a/arch/powerpc/kernel/exceptions-64s.S
3746 +++ b/arch/powerpc/kernel/exceptions-64s.S
3747 @@ -1004,10 +1004,10 @@ handle_page_fault:
3748 11: ld r4,_DAR(r1)
3749 ld r5,_DSISR(r1)
3750 addi r3,r1,STACK_FRAME_OVERHEAD
3751 + bl .save_nvgprs
3752 bl .do_page_fault
3753 cmpdi r3,0
3754 beq+ 13f
3755 - bl .save_nvgprs
3756 mr r5,r3
3757 addi r3,r1,STACK_FRAME_OVERHEAD
3758 lwz r4,_DAR(r1)
3759 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3760 index 01e2877..a1ba360 100644
3761 --- a/arch/powerpc/kernel/irq.c
3762 +++ b/arch/powerpc/kernel/irq.c
3763 @@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3764 host->ops = ops;
3765 host->of_node = of_node_get(of_node);
3766
3767 - if (host->ops->match == NULL)
3768 - host->ops->match = default_irq_host_match;
3769 -
3770 raw_spin_lock_irqsave(&irq_big_lock, flags);
3771
3772 /* If it's a legacy controller, check for duplicates and
3773 @@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3774 */
3775 raw_spin_lock_irqsave(&irq_big_lock, flags);
3776 list_for_each_entry(h, &irq_hosts, link)
3777 - if (h->ops->match(h, node)) {
3778 + if (h->ops->match) {
3779 + if (h->ops->match(h, node)) {
3780 + found = h;
3781 + break;
3782 + }
3783 + } else if (default_irq_host_match(h, node)) {
3784 found = h;
3785 break;
3786 }
3787 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3788 index 0b6d796..d760ddb 100644
3789 --- a/arch/powerpc/kernel/module_32.c
3790 +++ b/arch/powerpc/kernel/module_32.c
3791 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3792 me->arch.core_plt_section = i;
3793 }
3794 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3795 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3796 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3797 return -ENOEXEC;
3798 }
3799
3800 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3801
3802 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3803 /* Init, or core PLT? */
3804 - if (location >= mod->module_core
3805 - && location < mod->module_core + mod->core_size)
3806 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3807 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3808 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3809 - else
3810 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3811 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3812 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3813 + else {
3814 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3815 + return ~0UL;
3816 + }
3817
3818 /* Find this entry, or if that fails, the next avail. entry */
3819 while (entry->jump[0]) {
3820 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3821 index d817ab0..b23b18e 100644
3822 --- a/arch/powerpc/kernel/process.c
3823 +++ b/arch/powerpc/kernel/process.c
3824 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
3825 * Lookup NIP late so we have the best change of getting the
3826 * above info out without failing
3827 */
3828 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3829 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3830 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3831 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3832 #endif
3833 show_stack(current, (unsigned long *) regs->gpr[1]);
3834 if (!user_mode(regs))
3835 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3836 newsp = stack[0];
3837 ip = stack[STACK_FRAME_LR_SAVE];
3838 if (!firstframe || ip != lr) {
3839 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3840 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3841 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3842 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3843 - printk(" (%pS)",
3844 + printk(" (%pA)",
3845 (void *)current->ret_stack[curr_frame].ret);
3846 curr_frame--;
3847 }
3848 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3849 struct pt_regs *regs = (struct pt_regs *)
3850 (sp + STACK_FRAME_OVERHEAD);
3851 lr = regs->link;
3852 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3853 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3854 regs->trap, (void *)regs->nip, (void *)lr);
3855 firstframe = 1;
3856 }
3857 @@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
3858 }
3859
3860 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3861 -
3862 -unsigned long arch_align_stack(unsigned long sp)
3863 -{
3864 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3865 - sp -= get_random_int() & ~PAGE_MASK;
3866 - return sp & ~0xf;
3867 -}
3868 -
3869 -static inline unsigned long brk_rnd(void)
3870 -{
3871 - unsigned long rnd = 0;
3872 -
3873 - /* 8MB for 32bit, 1GB for 64bit */
3874 - if (is_32bit_task())
3875 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3876 - else
3877 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3878 -
3879 - return rnd << PAGE_SHIFT;
3880 -}
3881 -
3882 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3883 -{
3884 - unsigned long base = mm->brk;
3885 - unsigned long ret;
3886 -
3887 -#ifdef CONFIG_PPC_STD_MMU_64
3888 - /*
3889 - * If we are using 1TB segments and we are allowed to randomise
3890 - * the heap, we can put it above 1TB so it is backed by a 1TB
3891 - * segment. Otherwise the heap will be in the bottom 1TB
3892 - * which always uses 256MB segments and this may result in a
3893 - * performance penalty.
3894 - */
3895 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3896 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3897 -#endif
3898 -
3899 - ret = PAGE_ALIGN(base + brk_rnd());
3900 -
3901 - if (ret < mm->brk)
3902 - return mm->brk;
3903 -
3904 - return ret;
3905 -}
3906 -
3907 -unsigned long randomize_et_dyn(unsigned long base)
3908 -{
3909 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3910 -
3911 - if (ret < base)
3912 - return base;
3913 -
3914 - return ret;
3915 -}
3916 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3917 index 836a5a1..27289a3 100644
3918 --- a/arch/powerpc/kernel/signal_32.c
3919 +++ b/arch/powerpc/kernel/signal_32.c
3920 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3921 /* Save user registers on the stack */
3922 frame = &rt_sf->uc.uc_mcontext;
3923 addr = frame;
3924 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3925 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3926 if (save_user_regs(regs, frame, 0, 1))
3927 goto badframe;
3928 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3929 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3930 index a50b5ec..547078a 100644
3931 --- a/arch/powerpc/kernel/signal_64.c
3932 +++ b/arch/powerpc/kernel/signal_64.c
3933 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3934 current->thread.fpscr.val = 0;
3935
3936 /* Set up to return from userspace. */
3937 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3938 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3939 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3940 } else {
3941 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3942 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3943 index c091527..5592625 100644
3944 --- a/arch/powerpc/kernel/traps.c
3945 +++ b/arch/powerpc/kernel/traps.c
3946 @@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
3947 return flags;
3948 }
3949
3950 +extern void gr_handle_kernel_exploit(void);
3951 +
3952 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3953 int signr)
3954 {
3955 @@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3956 panic("Fatal exception in interrupt");
3957 if (panic_on_oops)
3958 panic("Fatal exception");
3959 +
3960 + gr_handle_kernel_exploit();
3961 +
3962 do_exit(signr);
3963 }
3964
3965 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3966 index 7d14bb6..1305601 100644
3967 --- a/arch/powerpc/kernel/vdso.c
3968 +++ b/arch/powerpc/kernel/vdso.c
3969 @@ -35,6 +35,7 @@
3970 #include <asm/firmware.h>
3971 #include <asm/vdso.h>
3972 #include <asm/vdso_datapage.h>
3973 +#include <asm/mman.h>
3974
3975 #include "setup.h"
3976
3977 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3978 vdso_base = VDSO32_MBASE;
3979 #endif
3980
3981 - current->mm->context.vdso_base = 0;
3982 + current->mm->context.vdso_base = ~0UL;
3983
3984 /* vDSO has a problem and was disabled, just don't "enable" it for the
3985 * process
3986 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3987 vdso_base = get_unmapped_area(NULL, vdso_base,
3988 (vdso_pages << PAGE_SHIFT) +
3989 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3990 - 0, 0);
3991 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3992 if (IS_ERR_VALUE(vdso_base)) {
3993 rc = vdso_base;
3994 goto fail_mmapsem;
3995 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3996 index 5eea6f3..5d10396 100644
3997 --- a/arch/powerpc/lib/usercopy_64.c
3998 +++ b/arch/powerpc/lib/usercopy_64.c
3999 @@ -9,22 +9,6 @@
4000 #include <linux/module.h>
4001 #include <asm/uaccess.h>
4002
4003 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4004 -{
4005 - if (likely(access_ok(VERIFY_READ, from, n)))
4006 - n = __copy_from_user(to, from, n);
4007 - else
4008 - memset(to, 0, n);
4009 - return n;
4010 -}
4011 -
4012 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4013 -{
4014 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4015 - n = __copy_to_user(to, from, n);
4016 - return n;
4017 -}
4018 -
4019 unsigned long copy_in_user(void __user *to, const void __user *from,
4020 unsigned long n)
4021 {
4022 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4023 return n;
4024 }
4025
4026 -EXPORT_SYMBOL(copy_from_user);
4027 -EXPORT_SYMBOL(copy_to_user);
4028 EXPORT_SYMBOL(copy_in_user);
4029
4030 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4031 index 2f0d1b0..36fb5cc 100644
4032 --- a/arch/powerpc/mm/fault.c
4033 +++ b/arch/powerpc/mm/fault.c
4034 @@ -32,6 +32,10 @@
4035 #include <linux/perf_event.h>
4036 #include <linux/magic.h>
4037 #include <linux/ratelimit.h>
4038 +#include <linux/slab.h>
4039 +#include <linux/pagemap.h>
4040 +#include <linux/compiler.h>
4041 +#include <linux/unistd.h>
4042
4043 #include <asm/firmware.h>
4044 #include <asm/page.h>
4045 @@ -43,6 +47,7 @@
4046 #include <asm/tlbflush.h>
4047 #include <asm/siginfo.h>
4048 #include <mm/mmu_decl.h>
4049 +#include <asm/ptrace.h>
4050
4051 #include "icswx.h"
4052
4053 @@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4054 }
4055 #endif
4056
4057 +#ifdef CONFIG_PAX_PAGEEXEC
4058 +/*
4059 + * PaX: decide what to do with offenders (regs->nip = fault address)
4060 + *
4061 + * returns 1 when task should be killed
4062 + */
4063 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4064 +{
4065 + return 1;
4066 +}
4067 +
4068 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4069 +{
4070 + unsigned long i;
4071 +
4072 + printk(KERN_ERR "PAX: bytes at PC: ");
4073 + for (i = 0; i < 5; i++) {
4074 + unsigned int c;
4075 + if (get_user(c, (unsigned int __user *)pc+i))
4076 + printk(KERN_CONT "???????? ");
4077 + else
4078 + printk(KERN_CONT "%08x ", c);
4079 + }
4080 + printk("\n");
4081 +}
4082 +#endif
4083 +
4084 /*
4085 * Check whether the instruction at regs->nip is a store using
4086 * an update addressing form which will update r1.
4087 @@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4088 * indicate errors in DSISR but can validly be set in SRR1.
4089 */
4090 if (trap == 0x400)
4091 - error_code &= 0x48200000;
4092 + error_code &= 0x58200000;
4093 else
4094 is_write = error_code & DSISR_ISSTORE;
4095 #else
4096 @@ -276,7 +308,7 @@ good_area:
4097 * "undefined". Of those that can be set, this is the only
4098 * one which seems bad.
4099 */
4100 - if (error_code & 0x10000000)
4101 + if (error_code & DSISR_GUARDED)
4102 /* Guarded storage error. */
4103 goto bad_area;
4104 #endif /* CONFIG_8xx */
4105 @@ -291,7 +323,7 @@ good_area:
4106 * processors use the same I/D cache coherency mechanism
4107 * as embedded.
4108 */
4109 - if (error_code & DSISR_PROTFAULT)
4110 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4111 goto bad_area;
4112 #endif /* CONFIG_PPC_STD_MMU */
4113
4114 @@ -360,6 +392,23 @@ bad_area:
4115 bad_area_nosemaphore:
4116 /* User mode accesses cause a SIGSEGV */
4117 if (user_mode(regs)) {
4118 +
4119 +#ifdef CONFIG_PAX_PAGEEXEC
4120 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4121 +#ifdef CONFIG_PPC_STD_MMU
4122 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4123 +#else
4124 + if (is_exec && regs->nip == address) {
4125 +#endif
4126 + switch (pax_handle_fetch_fault(regs)) {
4127 + }
4128 +
4129 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4130 + do_group_exit(SIGKILL);
4131 + }
4132 + }
4133 +#endif
4134 +
4135 _exception(SIGSEGV, regs, code, address);
4136 return 0;
4137 }
4138 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4139 index 67a42ed..1c7210c 100644
4140 --- a/arch/powerpc/mm/mmap_64.c
4141 +++ b/arch/powerpc/mm/mmap_64.c
4142 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4143 */
4144 if (mmap_is_legacy()) {
4145 mm->mmap_base = TASK_UNMAPPED_BASE;
4146 +
4147 +#ifdef CONFIG_PAX_RANDMMAP
4148 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4149 + mm->mmap_base += mm->delta_mmap;
4150 +#endif
4151 +
4152 mm->get_unmapped_area = arch_get_unmapped_area;
4153 mm->unmap_area = arch_unmap_area;
4154 } else {
4155 mm->mmap_base = mmap_base();
4156 +
4157 +#ifdef CONFIG_PAX_RANDMMAP
4158 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4159 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4160 +#endif
4161 +
4162 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4163 mm->unmap_area = arch_unmap_area_topdown;
4164 }
4165 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4166 index 73709f7..6b90313 100644
4167 --- a/arch/powerpc/mm/slice.c
4168 +++ b/arch/powerpc/mm/slice.c
4169 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4170 if ((mm->task_size - len) < addr)
4171 return 0;
4172 vma = find_vma(mm, addr);
4173 - return (!vma || (addr + len) <= vma->vm_start);
4174 + return check_heap_stack_gap(vma, addr, len);
4175 }
4176
4177 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4178 @@ -256,7 +256,7 @@ full_search:
4179 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4180 continue;
4181 }
4182 - if (!vma || addr + len <= vma->vm_start) {
4183 + if (check_heap_stack_gap(vma, addr, len)) {
4184 /*
4185 * Remember the place where we stopped the search:
4186 */
4187 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4188 }
4189 }
4190
4191 - addr = mm->mmap_base;
4192 - while (addr > len) {
4193 + if (mm->mmap_base < len)
4194 + addr = -ENOMEM;
4195 + else
4196 + addr = mm->mmap_base - len;
4197 +
4198 + while (!IS_ERR_VALUE(addr)) {
4199 /* Go down by chunk size */
4200 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4201 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4202
4203 /* Check for hit with different page size */
4204 mask = slice_range_to_mask(addr, len);
4205 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4206 * return with success:
4207 */
4208 vma = find_vma(mm, addr);
4209 - if (!vma || (addr + len) <= vma->vm_start) {
4210 + if (check_heap_stack_gap(vma, addr, len)) {
4211 /* remember the address as a hint for next time */
4212 if (use_cache)
4213 mm->free_area_cache = addr;
4214 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4215 mm->cached_hole_size = vma->vm_start - addr;
4216
4217 /* try just below the current vma->vm_start */
4218 - addr = vma->vm_start;
4219 + addr = skip_heap_stack_gap(vma, len);
4220 }
4221
4222 /*
4223 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4224 if (fixed && addr > (mm->task_size - len))
4225 return -EINVAL;
4226
4227 +#ifdef CONFIG_PAX_RANDMMAP
4228 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4229 + addr = 0;
4230 +#endif
4231 +
4232 /* If hint, make sure it matches our alignment restrictions */
4233 if (!fixed && addr) {
4234 addr = _ALIGN_UP(addr, 1ul << pshift);
4235 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4236 index 8517d2a..d2738d4 100644
4237 --- a/arch/s390/include/asm/atomic.h
4238 +++ b/arch/s390/include/asm/atomic.h
4239 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4240 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4241 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4242
4243 +#define atomic64_read_unchecked(v) atomic64_read(v)
4244 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4245 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4246 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4247 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4248 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4249 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4250 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4251 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4252 +
4253 #define smp_mb__before_atomic_dec() smp_mb()
4254 #define smp_mb__after_atomic_dec() smp_mb()
4255 #define smp_mb__before_atomic_inc() smp_mb()
4256 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4257 index 2a30d5a..5e5586f 100644
4258 --- a/arch/s390/include/asm/cache.h
4259 +++ b/arch/s390/include/asm/cache.h
4260 @@ -11,8 +11,10 @@
4261 #ifndef __ARCH_S390_CACHE_H
4262 #define __ARCH_S390_CACHE_H
4263
4264 -#define L1_CACHE_BYTES 256
4265 +#include <linux/const.h>
4266 +
4267 #define L1_CACHE_SHIFT 8
4268 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4269 #define NET_SKB_PAD 32
4270
4271 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4272 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4273 index 547f1a6..0b22b53 100644
4274 --- a/arch/s390/include/asm/elf.h
4275 +++ b/arch/s390/include/asm/elf.h
4276 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4277 the loader. We need to make sure that it is out of the way of the program
4278 that it will "exec", and that there is sufficient room for the brk. */
4279
4280 -extern unsigned long randomize_et_dyn(unsigned long base);
4281 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4282 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4283 +
4284 +#ifdef CONFIG_PAX_ASLR
4285 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4286 +
4287 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4288 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4289 +#endif
4290
4291 /* This yields a mask that user programs can use to figure out what
4292 instruction set this CPU supports. */
4293 @@ -211,7 +217,4 @@ struct linux_binprm;
4294 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4295 int arch_setup_additional_pages(struct linux_binprm *, int);
4296
4297 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4298 -#define arch_randomize_brk arch_randomize_brk
4299 -
4300 #endif
4301 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4302 index d73cc6b..1a296ad 100644
4303 --- a/arch/s390/include/asm/system.h
4304 +++ b/arch/s390/include/asm/system.h
4305 @@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4306 extern void (*_machine_halt)(void);
4307 extern void (*_machine_power_off)(void);
4308
4309 -extern unsigned long arch_align_stack(unsigned long sp);
4310 +#define arch_align_stack(x) ((x) & ~0xfUL)
4311
4312 static inline int tprot(unsigned long addr)
4313 {
4314 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4315 index 2b23885..e136e31 100644
4316 --- a/arch/s390/include/asm/uaccess.h
4317 +++ b/arch/s390/include/asm/uaccess.h
4318 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4319 copy_to_user(void __user *to, const void *from, unsigned long n)
4320 {
4321 might_fault();
4322 +
4323 + if ((long)n < 0)
4324 + return n;
4325 +
4326 if (access_ok(VERIFY_WRITE, to, n))
4327 n = __copy_to_user(to, from, n);
4328 return n;
4329 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4330 static inline unsigned long __must_check
4331 __copy_from_user(void *to, const void __user *from, unsigned long n)
4332 {
4333 + if ((long)n < 0)
4334 + return n;
4335 +
4336 if (__builtin_constant_p(n) && (n <= 256))
4337 return uaccess.copy_from_user_small(n, from, to);
4338 else
4339 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4340 unsigned int sz = __compiletime_object_size(to);
4341
4342 might_fault();
4343 +
4344 + if ((long)n < 0)
4345 + return n;
4346 +
4347 if (unlikely(sz != -1 && sz < n)) {
4348 copy_from_user_overflow();
4349 return n;
4350 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4351 index dfcb343..eda788a 100644
4352 --- a/arch/s390/kernel/module.c
4353 +++ b/arch/s390/kernel/module.c
4354 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4355
4356 /* Increase core size by size of got & plt and set start
4357 offsets for got and plt. */
4358 - me->core_size = ALIGN(me->core_size, 4);
4359 - me->arch.got_offset = me->core_size;
4360 - me->core_size += me->arch.got_size;
4361 - me->arch.plt_offset = me->core_size;
4362 - me->core_size += me->arch.plt_size;
4363 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4364 + me->arch.got_offset = me->core_size_rw;
4365 + me->core_size_rw += me->arch.got_size;
4366 + me->arch.plt_offset = me->core_size_rx;
4367 + me->core_size_rx += me->arch.plt_size;
4368 return 0;
4369 }
4370
4371 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4372 if (info->got_initialized == 0) {
4373 Elf_Addr *gotent;
4374
4375 - gotent = me->module_core + me->arch.got_offset +
4376 + gotent = me->module_core_rw + me->arch.got_offset +
4377 info->got_offset;
4378 *gotent = val;
4379 info->got_initialized = 1;
4380 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4381 else if (r_type == R_390_GOTENT ||
4382 r_type == R_390_GOTPLTENT)
4383 *(unsigned int *) loc =
4384 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4385 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4386 else if (r_type == R_390_GOT64 ||
4387 r_type == R_390_GOTPLT64)
4388 *(unsigned long *) loc = val;
4389 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4390 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4391 if (info->plt_initialized == 0) {
4392 unsigned int *ip;
4393 - ip = me->module_core + me->arch.plt_offset +
4394 + ip = me->module_core_rx + me->arch.plt_offset +
4395 info->plt_offset;
4396 #ifndef CONFIG_64BIT
4397 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4398 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4399 val - loc + 0xffffUL < 0x1ffffeUL) ||
4400 (r_type == R_390_PLT32DBL &&
4401 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4402 - val = (Elf_Addr) me->module_core +
4403 + val = (Elf_Addr) me->module_core_rx +
4404 me->arch.plt_offset +
4405 info->plt_offset;
4406 val += rela->r_addend - loc;
4407 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4408 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4409 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4410 val = val + rela->r_addend -
4411 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4412 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4413 if (r_type == R_390_GOTOFF16)
4414 *(unsigned short *) loc = val;
4415 else if (r_type == R_390_GOTOFF32)
4416 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4417 break;
4418 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4419 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4420 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4421 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4422 rela->r_addend - loc;
4423 if (r_type == R_390_GOTPC)
4424 *(unsigned int *) loc = val;
4425 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4426 index e795933..b32563c 100644
4427 --- a/arch/s390/kernel/process.c
4428 +++ b/arch/s390/kernel/process.c
4429 @@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4430 }
4431 return 0;
4432 }
4433 -
4434 -unsigned long arch_align_stack(unsigned long sp)
4435 -{
4436 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4437 - sp -= get_random_int() & ~PAGE_MASK;
4438 - return sp & ~0xf;
4439 -}
4440 -
4441 -static inline unsigned long brk_rnd(void)
4442 -{
4443 - /* 8MB for 32bit, 1GB for 64bit */
4444 - if (is_32bit_task())
4445 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4446 - else
4447 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4448 -}
4449 -
4450 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4451 -{
4452 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4453 -
4454 - if (ret < mm->brk)
4455 - return mm->brk;
4456 - return ret;
4457 -}
4458 -
4459 -unsigned long randomize_et_dyn(unsigned long base)
4460 -{
4461 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4462 -
4463 - if (!(current->flags & PF_RANDOMIZE))
4464 - return base;
4465 - if (ret < base)
4466 - return base;
4467 - return ret;
4468 -}
4469 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4470 index a0155c0..34cc491 100644
4471 --- a/arch/s390/mm/mmap.c
4472 +++ b/arch/s390/mm/mmap.c
4473 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4474 */
4475 if (mmap_is_legacy()) {
4476 mm->mmap_base = TASK_UNMAPPED_BASE;
4477 +
4478 +#ifdef CONFIG_PAX_RANDMMAP
4479 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4480 + mm->mmap_base += mm->delta_mmap;
4481 +#endif
4482 +
4483 mm->get_unmapped_area = arch_get_unmapped_area;
4484 mm->unmap_area = arch_unmap_area;
4485 } else {
4486 mm->mmap_base = mmap_base();
4487 +
4488 +#ifdef CONFIG_PAX_RANDMMAP
4489 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4490 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4491 +#endif
4492 +
4493 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4494 mm->unmap_area = arch_unmap_area_topdown;
4495 }
4496 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4497 */
4498 if (mmap_is_legacy()) {
4499 mm->mmap_base = TASK_UNMAPPED_BASE;
4500 +
4501 +#ifdef CONFIG_PAX_RANDMMAP
4502 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4503 + mm->mmap_base += mm->delta_mmap;
4504 +#endif
4505 +
4506 mm->get_unmapped_area = s390_get_unmapped_area;
4507 mm->unmap_area = arch_unmap_area;
4508 } else {
4509 mm->mmap_base = mmap_base();
4510 +
4511 +#ifdef CONFIG_PAX_RANDMMAP
4512 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4513 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4514 +#endif
4515 +
4516 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4517 mm->unmap_area = arch_unmap_area_topdown;
4518 }
4519 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4520 index ae3d59f..f65f075 100644
4521 --- a/arch/score/include/asm/cache.h
4522 +++ b/arch/score/include/asm/cache.h
4523 @@ -1,7 +1,9 @@
4524 #ifndef _ASM_SCORE_CACHE_H
4525 #define _ASM_SCORE_CACHE_H
4526
4527 +#include <linux/const.h>
4528 +
4529 #define L1_CACHE_SHIFT 4
4530 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4531 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4532
4533 #endif /* _ASM_SCORE_CACHE_H */
4534 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4535 index 589d5c7..669e274 100644
4536 --- a/arch/score/include/asm/system.h
4537 +++ b/arch/score/include/asm/system.h
4538 @@ -17,7 +17,7 @@ do { \
4539 #define finish_arch_switch(prev) do {} while (0)
4540
4541 typedef void (*vi_handler_t)(void);
4542 -extern unsigned long arch_align_stack(unsigned long sp);
4543 +#define arch_align_stack(x) (x)
4544
4545 #define mb() barrier()
4546 #define rmb() barrier()
4547 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4548 index 25d0803..d6c8e36 100644
4549 --- a/arch/score/kernel/process.c
4550 +++ b/arch/score/kernel/process.c
4551 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4552
4553 return task_pt_regs(task)->cp0_epc;
4554 }
4555 -
4556 -unsigned long arch_align_stack(unsigned long sp)
4557 -{
4558 - return sp;
4559 -}
4560 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4561 index ef9e555..331bd29 100644
4562 --- a/arch/sh/include/asm/cache.h
4563 +++ b/arch/sh/include/asm/cache.h
4564 @@ -9,10 +9,11 @@
4565 #define __ASM_SH_CACHE_H
4566 #ifdef __KERNEL__
4567
4568 +#include <linux/const.h>
4569 #include <linux/init.h>
4570 #include <cpu/cache.h>
4571
4572 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4573 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4574
4575 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4576
4577 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4578 index afeb710..d1d1289 100644
4579 --- a/arch/sh/mm/mmap.c
4580 +++ b/arch/sh/mm/mmap.c
4581 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4582 addr = PAGE_ALIGN(addr);
4583
4584 vma = find_vma(mm, addr);
4585 - if (TASK_SIZE - len >= addr &&
4586 - (!vma || addr + len <= vma->vm_start))
4587 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4588 return addr;
4589 }
4590
4591 @@ -106,7 +105,7 @@ full_search:
4592 }
4593 return -ENOMEM;
4594 }
4595 - if (likely(!vma || addr + len <= vma->vm_start)) {
4596 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4597 /*
4598 * Remember the place where we stopped the search:
4599 */
4600 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4601 addr = PAGE_ALIGN(addr);
4602
4603 vma = find_vma(mm, addr);
4604 - if (TASK_SIZE - len >= addr &&
4605 - (!vma || addr + len <= vma->vm_start))
4606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4607 return addr;
4608 }
4609
4610 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4611 /* make sure it can fit in the remaining address space */
4612 if (likely(addr > len)) {
4613 vma = find_vma(mm, addr-len);
4614 - if (!vma || addr <= vma->vm_start) {
4615 + if (check_heap_stack_gap(vma, addr - len, len)) {
4616 /* remember the address as a hint for next time */
4617 return (mm->free_area_cache = addr-len);
4618 }
4619 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4620 if (unlikely(mm->mmap_base < len))
4621 goto bottomup;
4622
4623 - addr = mm->mmap_base-len;
4624 - if (do_colour_align)
4625 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4626 + addr = mm->mmap_base - len;
4627
4628 do {
4629 + if (do_colour_align)
4630 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4631 /*
4632 * Lookup failure means no vma is above this address,
4633 * else if new region fits below vma->vm_start,
4634 * return with success:
4635 */
4636 vma = find_vma(mm, addr);
4637 - if (likely(!vma || addr+len <= vma->vm_start)) {
4638 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4639 /* remember the address as a hint for next time */
4640 return (mm->free_area_cache = addr);
4641 }
4642 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4643 mm->cached_hole_size = vma->vm_start - addr;
4644
4645 /* try just below the current vma->vm_start */
4646 - addr = vma->vm_start-len;
4647 - if (do_colour_align)
4648 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4649 - } while (likely(len < vma->vm_start));
4650 + addr = skip_heap_stack_gap(vma, len);
4651 + } while (!IS_ERR_VALUE(addr));
4652
4653 bottomup:
4654 /*
4655 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4656 index eddcfb3..b117d90 100644
4657 --- a/arch/sparc/Makefile
4658 +++ b/arch/sparc/Makefile
4659 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4660 # Export what is needed by arch/sparc/boot/Makefile
4661 export VMLINUX_INIT VMLINUX_MAIN
4662 VMLINUX_INIT := $(head-y) $(init-y)
4663 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4664 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4665 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4666 VMLINUX_MAIN += $(drivers-y) $(net-y)
4667
4668 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4669 index 9f421df..b81fc12 100644
4670 --- a/arch/sparc/include/asm/atomic_64.h
4671 +++ b/arch/sparc/include/asm/atomic_64.h
4672 @@ -14,18 +14,40 @@
4673 #define ATOMIC64_INIT(i) { (i) }
4674
4675 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4676 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4677 +{
4678 + return v->counter;
4679 +}
4680 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4681 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4682 +{
4683 + return v->counter;
4684 +}
4685
4686 #define atomic_set(v, i) (((v)->counter) = i)
4687 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4688 +{
4689 + v->counter = i;
4690 +}
4691 #define atomic64_set(v, i) (((v)->counter) = i)
4692 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4693 +{
4694 + v->counter = i;
4695 +}
4696
4697 extern void atomic_add(int, atomic_t *);
4698 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4699 extern void atomic64_add(long, atomic64_t *);
4700 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4701 extern void atomic_sub(int, atomic_t *);
4702 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4703 extern void atomic64_sub(long, atomic64_t *);
4704 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4705
4706 extern int atomic_add_ret(int, atomic_t *);
4707 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4708 extern long atomic64_add_ret(long, atomic64_t *);
4709 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4710 extern int atomic_sub_ret(int, atomic_t *);
4711 extern long atomic64_sub_ret(long, atomic64_t *);
4712
4713 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4714 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4715
4716 #define atomic_inc_return(v) atomic_add_ret(1, v)
4717 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4718 +{
4719 + return atomic_add_ret_unchecked(1, v);
4720 +}
4721 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4722 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4723 +{
4724 + return atomic64_add_ret_unchecked(1, v);
4725 +}
4726
4727 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4728 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4729
4730 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4731 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4732 +{
4733 + return atomic_add_ret_unchecked(i, v);
4734 +}
4735 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4736 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4737 +{
4738 + return atomic64_add_ret_unchecked(i, v);
4739 +}
4740
4741 /*
4742 * atomic_inc_and_test - increment and test
4743 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4744 * other cases.
4745 */
4746 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4747 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4748 +{
4749 + return atomic_inc_return_unchecked(v) == 0;
4750 +}
4751 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4752
4753 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4754 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4755 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4756
4757 #define atomic_inc(v) atomic_add(1, v)
4758 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4759 +{
4760 + atomic_add_unchecked(1, v);
4761 +}
4762 #define atomic64_inc(v) atomic64_add(1, v)
4763 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4764 +{
4765 + atomic64_add_unchecked(1, v);
4766 +}
4767
4768 #define atomic_dec(v) atomic_sub(1, v)
4769 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4770 +{
4771 + atomic_sub_unchecked(1, v);
4772 +}
4773 #define atomic64_dec(v) atomic64_sub(1, v)
4774 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4775 +{
4776 + atomic64_sub_unchecked(1, v);
4777 +}
4778
4779 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4780 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4781
4782 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4783 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4784 +{
4785 + return cmpxchg(&v->counter, old, new);
4786 +}
4787 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4788 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4789 +{
4790 + return xchg(&v->counter, new);
4791 +}
4792
4793 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4794 {
4795 - int c, old;
4796 + int c, old, new;
4797 c = atomic_read(v);
4798 for (;;) {
4799 - if (unlikely(c == (u)))
4800 + if (unlikely(c == u))
4801 break;
4802 - old = atomic_cmpxchg((v), c, c + (a));
4803 +
4804 + asm volatile("addcc %2, %0, %0\n"
4805 +
4806 +#ifdef CONFIG_PAX_REFCOUNT
4807 + "tvs %%icc, 6\n"
4808 +#endif
4809 +
4810 + : "=r" (new)
4811 + : "0" (c), "ir" (a)
4812 + : "cc");
4813 +
4814 + old = atomic_cmpxchg(v, c, new);
4815 if (likely(old == c))
4816 break;
4817 c = old;
4818 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4819 #define atomic64_cmpxchg(v, o, n) \
4820 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4821 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4822 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4823 +{
4824 + return xchg(&v->counter, new);
4825 +}
4826
4827 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4828 {
4829 - long c, old;
4830 + long c, old, new;
4831 c = atomic64_read(v);
4832 for (;;) {
4833 - if (unlikely(c == (u)))
4834 + if (unlikely(c == u))
4835 break;
4836 - old = atomic64_cmpxchg((v), c, c + (a));
4837 +
4838 + asm volatile("addcc %2, %0, %0\n"
4839 +
4840 +#ifdef CONFIG_PAX_REFCOUNT
4841 + "tvs %%xcc, 6\n"
4842 +#endif
4843 +
4844 + : "=r" (new)
4845 + : "0" (c), "ir" (a)
4846 + : "cc");
4847 +
4848 + old = atomic64_cmpxchg(v, c, new);
4849 if (likely(old == c))
4850 break;
4851 c = old;
4852 }
4853 - return c != (u);
4854 + return c != u;
4855 }
4856
4857 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4858 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4859 index 69358b5..9d0d492 100644
4860 --- a/arch/sparc/include/asm/cache.h
4861 +++ b/arch/sparc/include/asm/cache.h
4862 @@ -7,10 +7,12 @@
4863 #ifndef _SPARC_CACHE_H
4864 #define _SPARC_CACHE_H
4865
4866 +#include <linux/const.h>
4867 +
4868 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4869
4870 #define L1_CACHE_SHIFT 5
4871 -#define L1_CACHE_BYTES 32
4872 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4873
4874 #ifdef CONFIG_SPARC32
4875 #define SMP_CACHE_BYTES_SHIFT 5
4876 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4877 index 4269ca6..e3da77f 100644
4878 --- a/arch/sparc/include/asm/elf_32.h
4879 +++ b/arch/sparc/include/asm/elf_32.h
4880 @@ -114,6 +114,13 @@ typedef struct {
4881
4882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4883
4884 +#ifdef CONFIG_PAX_ASLR
4885 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4886 +
4887 +#define PAX_DELTA_MMAP_LEN 16
4888 +#define PAX_DELTA_STACK_LEN 16
4889 +#endif
4890 +
4891 /* This yields a mask that user programs can use to figure out what
4892 instruction set this cpu supports. This can NOT be done in userspace
4893 on Sparc. */
4894 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4895 index 7df8b7f..4946269 100644
4896 --- a/arch/sparc/include/asm/elf_64.h
4897 +++ b/arch/sparc/include/asm/elf_64.h
4898 @@ -180,6 +180,13 @@ typedef struct {
4899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4901
4902 +#ifdef CONFIG_PAX_ASLR
4903 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4904 +
4905 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4906 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4907 +#endif
4908 +
4909 extern unsigned long sparc64_elf_hwcap;
4910 #define ELF_HWCAP sparc64_elf_hwcap
4911
4912 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4913 index a790cc6..091ed94 100644
4914 --- a/arch/sparc/include/asm/pgtable_32.h
4915 +++ b/arch/sparc/include/asm/pgtable_32.h
4916 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4917 BTFIXUPDEF_INT(page_none)
4918 BTFIXUPDEF_INT(page_copy)
4919 BTFIXUPDEF_INT(page_readonly)
4920 +
4921 +#ifdef CONFIG_PAX_PAGEEXEC
4922 +BTFIXUPDEF_INT(page_shared_noexec)
4923 +BTFIXUPDEF_INT(page_copy_noexec)
4924 +BTFIXUPDEF_INT(page_readonly_noexec)
4925 +#endif
4926 +
4927 BTFIXUPDEF_INT(page_kernel)
4928
4929 #define PMD_SHIFT SUN4C_PMD_SHIFT
4930 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4931 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4932 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4933
4934 +#ifdef CONFIG_PAX_PAGEEXEC
4935 +extern pgprot_t PAGE_SHARED_NOEXEC;
4936 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4937 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4938 +#else
4939 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4940 +# define PAGE_COPY_NOEXEC PAGE_COPY
4941 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4942 +#endif
4943 +
4944 extern unsigned long page_kernel;
4945
4946 #ifdef MODULE
4947 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4948 index f6ae2b2..b03ffc7 100644
4949 --- a/arch/sparc/include/asm/pgtsrmmu.h
4950 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4951 @@ -115,6 +115,13 @@
4952 SRMMU_EXEC | SRMMU_REF)
4953 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4954 SRMMU_EXEC | SRMMU_REF)
4955 +
4956 +#ifdef CONFIG_PAX_PAGEEXEC
4957 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4958 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4959 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4960 +#endif
4961 +
4962 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4963 SRMMU_DIRTY | SRMMU_REF)
4964
4965 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4966 index 9689176..63c18ea 100644
4967 --- a/arch/sparc/include/asm/spinlock_64.h
4968 +++ b/arch/sparc/include/asm/spinlock_64.h
4969 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4970
4971 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4972
4973 -static void inline arch_read_lock(arch_rwlock_t *lock)
4974 +static inline void arch_read_lock(arch_rwlock_t *lock)
4975 {
4976 unsigned long tmp1, tmp2;
4977
4978 __asm__ __volatile__ (
4979 "1: ldsw [%2], %0\n"
4980 " brlz,pn %0, 2f\n"
4981 -"4: add %0, 1, %1\n"
4982 +"4: addcc %0, 1, %1\n"
4983 +
4984 +#ifdef CONFIG_PAX_REFCOUNT
4985 +" tvs %%icc, 6\n"
4986 +#endif
4987 +
4988 " cas [%2], %0, %1\n"
4989 " cmp %0, %1\n"
4990 " bne,pn %%icc, 1b\n"
4991 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
4992 " .previous"
4993 : "=&r" (tmp1), "=&r" (tmp2)
4994 : "r" (lock)
4995 - : "memory");
4996 + : "memory", "cc");
4997 }
4998
4999 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5000 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5001 {
5002 int tmp1, tmp2;
5003
5004 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5005 "1: ldsw [%2], %0\n"
5006 " brlz,a,pn %0, 2f\n"
5007 " mov 0, %0\n"
5008 -" add %0, 1, %1\n"
5009 +" addcc %0, 1, %1\n"
5010 +
5011 +#ifdef CONFIG_PAX_REFCOUNT
5012 +" tvs %%icc, 6\n"
5013 +#endif
5014 +
5015 " cas [%2], %0, %1\n"
5016 " cmp %0, %1\n"
5017 " bne,pn %%icc, 1b\n"
5018 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5019 return tmp1;
5020 }
5021
5022 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5023 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5024 {
5025 unsigned long tmp1, tmp2;
5026
5027 __asm__ __volatile__(
5028 "1: lduw [%2], %0\n"
5029 -" sub %0, 1, %1\n"
5030 +" subcc %0, 1, %1\n"
5031 +
5032 +#ifdef CONFIG_PAX_REFCOUNT
5033 +" tvs %%icc, 6\n"
5034 +#endif
5035 +
5036 " cas [%2], %0, %1\n"
5037 " cmp %0, %1\n"
5038 " bne,pn %%xcc, 1b\n"
5039 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5040 : "memory");
5041 }
5042
5043 -static void inline arch_write_lock(arch_rwlock_t *lock)
5044 +static inline void arch_write_lock(arch_rwlock_t *lock)
5045 {
5046 unsigned long mask, tmp1, tmp2;
5047
5048 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5049 : "memory");
5050 }
5051
5052 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5053 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5054 {
5055 __asm__ __volatile__(
5056 " stw %%g0, [%0]"
5057 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5058 : "memory");
5059 }
5060
5061 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5062 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5063 {
5064 unsigned long mask, tmp1, tmp2, result;
5065
5066 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5067 index c2a1080..21ed218 100644
5068 --- a/arch/sparc/include/asm/thread_info_32.h
5069 +++ b/arch/sparc/include/asm/thread_info_32.h
5070 @@ -50,6 +50,8 @@ struct thread_info {
5071 unsigned long w_saved;
5072
5073 struct restart_block restart_block;
5074 +
5075 + unsigned long lowest_stack;
5076 };
5077
5078 /*
5079 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5080 index 01d057f..0a02f7e 100644
5081 --- a/arch/sparc/include/asm/thread_info_64.h
5082 +++ b/arch/sparc/include/asm/thread_info_64.h
5083 @@ -63,6 +63,8 @@ struct thread_info {
5084 struct pt_regs *kern_una_regs;
5085 unsigned int kern_una_insn;
5086
5087 + unsigned long lowest_stack;
5088 +
5089 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5090 };
5091
5092 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5093 index e88fbe5..96b0ce5 100644
5094 --- a/arch/sparc/include/asm/uaccess.h
5095 +++ b/arch/sparc/include/asm/uaccess.h
5096 @@ -1,5 +1,13 @@
5097 #ifndef ___ASM_SPARC_UACCESS_H
5098 #define ___ASM_SPARC_UACCESS_H
5099 +
5100 +#ifdef __KERNEL__
5101 +#ifndef __ASSEMBLY__
5102 +#include <linux/types.h>
5103 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5104 +#endif
5105 +#endif
5106 +
5107 #if defined(__sparc__) && defined(__arch64__)
5108 #include <asm/uaccess_64.h>
5109 #else
5110 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5111 index 8303ac4..07f333d 100644
5112 --- a/arch/sparc/include/asm/uaccess_32.h
5113 +++ b/arch/sparc/include/asm/uaccess_32.h
5114 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5115
5116 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5117 {
5118 - if (n && __access_ok((unsigned long) to, n))
5119 + if ((long)n < 0)
5120 + return n;
5121 +
5122 + if (n && __access_ok((unsigned long) to, n)) {
5123 + if (!__builtin_constant_p(n))
5124 + check_object_size(from, n, true);
5125 return __copy_user(to, (__force void __user *) from, n);
5126 - else
5127 + } else
5128 return n;
5129 }
5130
5131 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5132 {
5133 + if ((long)n < 0)
5134 + return n;
5135 +
5136 + if (!__builtin_constant_p(n))
5137 + check_object_size(from, n, true);
5138 +
5139 return __copy_user(to, (__force void __user *) from, n);
5140 }
5141
5142 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5143 {
5144 - if (n && __access_ok((unsigned long) from, n))
5145 + if ((long)n < 0)
5146 + return n;
5147 +
5148 + if (n && __access_ok((unsigned long) from, n)) {
5149 + if (!__builtin_constant_p(n))
5150 + check_object_size(to, n, false);
5151 return __copy_user((__force void __user *) to, from, n);
5152 - else
5153 + } else
5154 return n;
5155 }
5156
5157 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5158 {
5159 + if ((long)n < 0)
5160 + return n;
5161 +
5162 return __copy_user((__force void __user *) to, from, n);
5163 }
5164
5165 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5166 index 3e1449f..5293a0e 100644
5167 --- a/arch/sparc/include/asm/uaccess_64.h
5168 +++ b/arch/sparc/include/asm/uaccess_64.h
5169 @@ -10,6 +10,7 @@
5170 #include <linux/compiler.h>
5171 #include <linux/string.h>
5172 #include <linux/thread_info.h>
5173 +#include <linux/kernel.h>
5174 #include <asm/asi.h>
5175 #include <asm/system.h>
5176 #include <asm/spitfire.h>
5177 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5178 static inline unsigned long __must_check
5179 copy_from_user(void *to, const void __user *from, unsigned long size)
5180 {
5181 - unsigned long ret = ___copy_from_user(to, from, size);
5182 + unsigned long ret;
5183
5184 + if ((long)size < 0 || size > INT_MAX)
5185 + return size;
5186 +
5187 + if (!__builtin_constant_p(size))
5188 + check_object_size(to, size, false);
5189 +
5190 + ret = ___copy_from_user(to, from, size);
5191 if (unlikely(ret))
5192 ret = copy_from_user_fixup(to, from, size);
5193
5194 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5195 static inline unsigned long __must_check
5196 copy_to_user(void __user *to, const void *from, unsigned long size)
5197 {
5198 - unsigned long ret = ___copy_to_user(to, from, size);
5199 + unsigned long ret;
5200
5201 + if ((long)size < 0 || size > INT_MAX)
5202 + return size;
5203 +
5204 + if (!__builtin_constant_p(size))
5205 + check_object_size(from, size, true);
5206 +
5207 + ret = ___copy_to_user(to, from, size);
5208 if (unlikely(ret))
5209 ret = copy_to_user_fixup(to, from, size);
5210 return ret;
5211 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5212 index cb85458..e063f17 100644
5213 --- a/arch/sparc/kernel/Makefile
5214 +++ b/arch/sparc/kernel/Makefile
5215 @@ -3,7 +3,7 @@
5216 #
5217
5218 asflags-y := -ansi
5219 -ccflags-y := -Werror
5220 +#ccflags-y := -Werror
5221
5222 extra-y := head_$(BITS).o
5223 extra-y += init_task.o
5224 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5225 index f793742..4d880af 100644
5226 --- a/arch/sparc/kernel/process_32.c
5227 +++ b/arch/sparc/kernel/process_32.c
5228 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5229 rw->ins[4], rw->ins[5],
5230 rw->ins[6],
5231 rw->ins[7]);
5232 - printk("%pS\n", (void *) rw->ins[7]);
5233 + printk("%pA\n", (void *) rw->ins[7]);
5234 rw = (struct reg_window32 *) rw->ins[6];
5235 }
5236 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5237 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5238
5239 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5240 r->psr, r->pc, r->npc, r->y, print_tainted());
5241 - printk("PC: <%pS>\n", (void *) r->pc);
5242 + printk("PC: <%pA>\n", (void *) r->pc);
5243 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5244 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5245 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5246 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5247 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5248 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5249 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5250 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5251
5252 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5253 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5254 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5255 rw = (struct reg_window32 *) fp;
5256 pc = rw->ins[7];
5257 printk("[%08lx : ", pc);
5258 - printk("%pS ] ", (void *) pc);
5259 + printk("%pA ] ", (void *) pc);
5260 fp = rw->ins[6];
5261 } while (++count < 16);
5262 printk("\n");
5263 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5264 index 39d8b05..d1a7d90 100644
5265 --- a/arch/sparc/kernel/process_64.c
5266 +++ b/arch/sparc/kernel/process_64.c
5267 @@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5268 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5269 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5270 if (regs->tstate & TSTATE_PRIV)
5271 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5272 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5273 }
5274
5275 void show_regs(struct pt_regs *regs)
5276 {
5277 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5278 regs->tpc, regs->tnpc, regs->y, print_tainted());
5279 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5280 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5281 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5282 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5283 regs->u_regs[3]);
5284 @@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5285 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5286 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5287 regs->u_regs[15]);
5288 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5289 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5290 show_regwindow(regs);
5291 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5292 }
5293 @@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5294 ((tp && tp->task) ? tp->task->pid : -1));
5295
5296 if (gp->tstate & TSTATE_PRIV) {
5297 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5298 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5299 (void *) gp->tpc,
5300 (void *) gp->o7,
5301 (void *) gp->i7,
5302 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5303 index 42b282f..28ce9f2 100644
5304 --- a/arch/sparc/kernel/sys_sparc_32.c
5305 +++ b/arch/sparc/kernel/sys_sparc_32.c
5306 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5307 if (ARCH_SUN4C && len > 0x20000000)
5308 return -ENOMEM;
5309 if (!addr)
5310 - addr = TASK_UNMAPPED_BASE;
5311 + addr = current->mm->mmap_base;
5312
5313 if (flags & MAP_SHARED)
5314 addr = COLOUR_ALIGN(addr);
5315 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5316 }
5317 if (TASK_SIZE - PAGE_SIZE - len < addr)
5318 return -ENOMEM;
5319 - if (!vmm || addr + len <= vmm->vm_start)
5320 + if (check_heap_stack_gap(vmm, addr, len))
5321 return addr;
5322 addr = vmm->vm_end;
5323 if (flags & MAP_SHARED)
5324 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5325 index 232df99..cee1f9c 100644
5326 --- a/arch/sparc/kernel/sys_sparc_64.c
5327 +++ b/arch/sparc/kernel/sys_sparc_64.c
5328 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5329 /* We do not accept a shared mapping if it would violate
5330 * cache aliasing constraints.
5331 */
5332 - if ((flags & MAP_SHARED) &&
5333 + if ((filp || (flags & MAP_SHARED)) &&
5334 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5335 return -EINVAL;
5336 return addr;
5337 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5338 if (filp || (flags & MAP_SHARED))
5339 do_color_align = 1;
5340
5341 +#ifdef CONFIG_PAX_RANDMMAP
5342 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5343 +#endif
5344 +
5345 if (addr) {
5346 if (do_color_align)
5347 addr = COLOUR_ALIGN(addr, pgoff);
5348 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5349 addr = PAGE_ALIGN(addr);
5350
5351 vma = find_vma(mm, addr);
5352 - if (task_size - len >= addr &&
5353 - (!vma || addr + len <= vma->vm_start))
5354 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5355 return addr;
5356 }
5357
5358 if (len > mm->cached_hole_size) {
5359 - start_addr = addr = mm->free_area_cache;
5360 + start_addr = addr = mm->free_area_cache;
5361 } else {
5362 - start_addr = addr = TASK_UNMAPPED_BASE;
5363 + start_addr = addr = mm->mmap_base;
5364 mm->cached_hole_size = 0;
5365 }
5366
5367 @@ -174,14 +177,14 @@ full_search:
5368 vma = find_vma(mm, VA_EXCLUDE_END);
5369 }
5370 if (unlikely(task_size < addr)) {
5371 - if (start_addr != TASK_UNMAPPED_BASE) {
5372 - start_addr = addr = TASK_UNMAPPED_BASE;
5373 + if (start_addr != mm->mmap_base) {
5374 + start_addr = addr = mm->mmap_base;
5375 mm->cached_hole_size = 0;
5376 goto full_search;
5377 }
5378 return -ENOMEM;
5379 }
5380 - if (likely(!vma || addr + len <= vma->vm_start)) {
5381 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5382 /*
5383 * Remember the place where we stopped the search:
5384 */
5385 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5386 /* We do not accept a shared mapping if it would violate
5387 * cache aliasing constraints.
5388 */
5389 - if ((flags & MAP_SHARED) &&
5390 + if ((filp || (flags & MAP_SHARED)) &&
5391 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5392 return -EINVAL;
5393 return addr;
5394 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 addr = PAGE_ALIGN(addr);
5396
5397 vma = find_vma(mm, addr);
5398 - if (task_size - len >= addr &&
5399 - (!vma || addr + len <= vma->vm_start))
5400 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5401 return addr;
5402 }
5403
5404 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5405 /* make sure it can fit in the remaining address space */
5406 if (likely(addr > len)) {
5407 vma = find_vma(mm, addr-len);
5408 - if (!vma || addr <= vma->vm_start) {
5409 + if (check_heap_stack_gap(vma, addr - len, len)) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr-len);
5412 }
5413 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 if (unlikely(mm->mmap_base < len))
5415 goto bottomup;
5416
5417 - addr = mm->mmap_base-len;
5418 - if (do_color_align)
5419 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5420 + addr = mm->mmap_base - len;
5421
5422 do {
5423 + if (do_color_align)
5424 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5425 /*
5426 * Lookup failure means no vma is above this address,
5427 * else if new region fits below vma->vm_start,
5428 * return with success:
5429 */
5430 vma = find_vma(mm, addr);
5431 - if (likely(!vma || addr+len <= vma->vm_start)) {
5432 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5433 /* remember the address as a hint for next time */
5434 return (mm->free_area_cache = addr);
5435 }
5436 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5437 mm->cached_hole_size = vma->vm_start - addr;
5438
5439 /* try just below the current vma->vm_start */
5440 - addr = vma->vm_start-len;
5441 - if (do_color_align)
5442 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5443 - } while (likely(len < vma->vm_start));
5444 + addr = skip_heap_stack_gap(vma, len);
5445 + } while (!IS_ERR_VALUE(addr));
5446
5447 bottomup:
5448 /*
5449 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5450 gap == RLIM_INFINITY ||
5451 sysctl_legacy_va_layout) {
5452 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5453 +
5454 +#ifdef CONFIG_PAX_RANDMMAP
5455 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5456 + mm->mmap_base += mm->delta_mmap;
5457 +#endif
5458 +
5459 mm->get_unmapped_area = arch_get_unmapped_area;
5460 mm->unmap_area = arch_unmap_area;
5461 } else {
5462 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5463 gap = (task_size / 6 * 5);
5464
5465 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5466 +
5467 +#ifdef CONFIG_PAX_RANDMMAP
5468 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5469 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5470 +#endif
5471 +
5472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5473 mm->unmap_area = arch_unmap_area_topdown;
5474 }
5475 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5476 index 591f20c..0f1b925 100644
5477 --- a/arch/sparc/kernel/traps_32.c
5478 +++ b/arch/sparc/kernel/traps_32.c
5479 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5480 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5481 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5482
5483 +extern void gr_handle_kernel_exploit(void);
5484 +
5485 void die_if_kernel(char *str, struct pt_regs *regs)
5486 {
5487 static int die_counter;
5488 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5489 count++ < 30 &&
5490 (((unsigned long) rw) >= PAGE_OFFSET) &&
5491 !(((unsigned long) rw) & 0x7)) {
5492 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5493 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5494 (void *) rw->ins[7]);
5495 rw = (struct reg_window32 *)rw->ins[6];
5496 }
5497 }
5498 printk("Instruction DUMP:");
5499 instruction_dump ((unsigned long *) regs->pc);
5500 - if(regs->psr & PSR_PS)
5501 + if(regs->psr & PSR_PS) {
5502 + gr_handle_kernel_exploit();
5503 do_exit(SIGKILL);
5504 + }
5505 do_exit(SIGSEGV);
5506 }
5507
5508 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5509 index 0cbdaa4..438e4c9 100644
5510 --- a/arch/sparc/kernel/traps_64.c
5511 +++ b/arch/sparc/kernel/traps_64.c
5512 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5513 i + 1,
5514 p->trapstack[i].tstate, p->trapstack[i].tpc,
5515 p->trapstack[i].tnpc, p->trapstack[i].tt);
5516 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5517 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5518 }
5519 }
5520
5521 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5522
5523 lvl -= 0x100;
5524 if (regs->tstate & TSTATE_PRIV) {
5525 +
5526 +#ifdef CONFIG_PAX_REFCOUNT
5527 + if (lvl == 6)
5528 + pax_report_refcount_overflow(regs);
5529 +#endif
5530 +
5531 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5532 die_if_kernel(buffer, regs);
5533 }
5534 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5535 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5536 {
5537 char buffer[32];
5538 -
5539 +
5540 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5541 0, lvl, SIGTRAP) == NOTIFY_STOP)
5542 return;
5543
5544 +#ifdef CONFIG_PAX_REFCOUNT
5545 + if (lvl == 6)
5546 + pax_report_refcount_overflow(regs);
5547 +#endif
5548 +
5549 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5550
5551 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5552 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5553 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5554 printk("%s" "ERROR(%d): ",
5555 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5556 - printk("TPC<%pS>\n", (void *) regs->tpc);
5557 + printk("TPC<%pA>\n", (void *) regs->tpc);
5558 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5559 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5560 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5561 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5562 smp_processor_id(),
5563 (type & 0x1) ? 'I' : 'D',
5564 regs->tpc);
5565 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5566 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5567 panic("Irrecoverable Cheetah+ parity error.");
5568 }
5569
5570 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5571 smp_processor_id(),
5572 (type & 0x1) ? 'I' : 'D',
5573 regs->tpc);
5574 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5575 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5576 }
5577
5578 struct sun4v_error_entry {
5579 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5580
5581 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5582 regs->tpc, tl);
5583 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5584 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5585 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5586 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5587 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5588 (void *) regs->u_regs[UREG_I7]);
5589 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5590 "pte[%lx] error[%lx]\n",
5591 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5592
5593 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5594 regs->tpc, tl);
5595 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5596 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5597 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5598 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5599 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5600 (void *) regs->u_regs[UREG_I7]);
5601 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5602 "pte[%lx] error[%lx]\n",
5603 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5604 fp = (unsigned long)sf->fp + STACK_BIAS;
5605 }
5606
5607 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5608 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5609 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5610 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5611 int index = tsk->curr_ret_stack;
5612 if (tsk->ret_stack && index >= graph) {
5613 pc = tsk->ret_stack[index - graph].ret;
5614 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5615 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5616 graph++;
5617 }
5618 }
5619 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5620 return (struct reg_window *) (fp + STACK_BIAS);
5621 }
5622
5623 +extern void gr_handle_kernel_exploit(void);
5624 +
5625 void die_if_kernel(char *str, struct pt_regs *regs)
5626 {
5627 static int die_counter;
5628 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5629 while (rw &&
5630 count++ < 30 &&
5631 kstack_valid(tp, (unsigned long) rw)) {
5632 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5633 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5634 (void *) rw->ins[7]);
5635
5636 rw = kernel_stack_up(rw);
5637 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5638 }
5639 user_instruction_dump ((unsigned int __user *) regs->tpc);
5640 }
5641 - if (regs->tstate & TSTATE_PRIV)
5642 + if (regs->tstate & TSTATE_PRIV) {
5643 + gr_handle_kernel_exploit();
5644 do_exit(SIGKILL);
5645 + }
5646 do_exit(SIGSEGV);
5647 }
5648 EXPORT_SYMBOL(die_if_kernel);
5649 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5650 index 76e4ac1..78f8bb1 100644
5651 --- a/arch/sparc/kernel/unaligned_64.c
5652 +++ b/arch/sparc/kernel/unaligned_64.c
5653 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5654 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5655
5656 if (__ratelimit(&ratelimit)) {
5657 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5658 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5659 regs->tpc, (void *) regs->tpc);
5660 }
5661 }
5662 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5663 index a3fc437..fea9957 100644
5664 --- a/arch/sparc/lib/Makefile
5665 +++ b/arch/sparc/lib/Makefile
5666 @@ -2,7 +2,7 @@
5667 #
5668
5669 asflags-y := -ansi -DST_DIV0=0x02
5670 -ccflags-y := -Werror
5671 +#ccflags-y := -Werror
5672
5673 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5674 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5675 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5676 index 59186e0..f747d7a 100644
5677 --- a/arch/sparc/lib/atomic_64.S
5678 +++ b/arch/sparc/lib/atomic_64.S
5679 @@ -18,7 +18,12 @@
5680 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683 - add %g1, %o0, %g7
5684 + addcc %g1, %o0, %g7
5685 +
5686 +#ifdef CONFIG_PAX_REFCOUNT
5687 + tvs %icc, 6
5688 +#endif
5689 +
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5693 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_add, .-atomic_add
5696
5697 + .globl atomic_add_unchecked
5698 + .type atomic_add_unchecked,#function
5699 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5700 + BACKOFF_SETUP(%o2)
5701 +1: lduw [%o1], %g1
5702 + add %g1, %o0, %g7
5703 + cas [%o1], %g1, %g7
5704 + cmp %g1, %g7
5705 + bne,pn %icc, 2f
5706 + nop
5707 + retl
5708 + nop
5709 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5710 + .size atomic_add_unchecked, .-atomic_add_unchecked
5711 +
5712 .globl atomic_sub
5713 .type atomic_sub,#function
5714 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717 - sub %g1, %o0, %g7
5718 + subcc %g1, %o0, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5727 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_sub, .-atomic_sub
5730
5731 + .globl atomic_sub_unchecked
5732 + .type atomic_sub_unchecked,#function
5733 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5734 + BACKOFF_SETUP(%o2)
5735 +1: lduw [%o1], %g1
5736 + sub %g1, %o0, %g7
5737 + cas [%o1], %g1, %g7
5738 + cmp %g1, %g7
5739 + bne,pn %icc, 2f
5740 + nop
5741 + retl
5742 + nop
5743 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5744 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5745 +
5746 .globl atomic_add_ret
5747 .type atomic_add_ret,#function
5748 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5749 BACKOFF_SETUP(%o2)
5750 1: lduw [%o1], %g1
5751 - add %g1, %o0, %g7
5752 + addcc %g1, %o0, %g7
5753 +
5754 +#ifdef CONFIG_PAX_REFCOUNT
5755 + tvs %icc, 6
5756 +#endif
5757 +
5758 cas [%o1], %g1, %g7
5759 cmp %g1, %g7
5760 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5761 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5762 2: BACKOFF_SPIN(%o2, %o3, 1b)
5763 .size atomic_add_ret, .-atomic_add_ret
5764
5765 + .globl atomic_add_ret_unchecked
5766 + .type atomic_add_ret_unchecked,#function
5767 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5768 + BACKOFF_SETUP(%o2)
5769 +1: lduw [%o1], %g1
5770 + addcc %g1, %o0, %g7
5771 + cas [%o1], %g1, %g7
5772 + cmp %g1, %g7
5773 + bne,pn %icc, 2f
5774 + add %g7, %o0, %g7
5775 + sra %g7, 0, %o0
5776 + retl
5777 + nop
5778 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5779 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5780 +
5781 .globl atomic_sub_ret
5782 .type atomic_sub_ret,#function
5783 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5784 BACKOFF_SETUP(%o2)
5785 1: lduw [%o1], %g1
5786 - sub %g1, %o0, %g7
5787 + subcc %g1, %o0, %g7
5788 +
5789 +#ifdef CONFIG_PAX_REFCOUNT
5790 + tvs %icc, 6
5791 +#endif
5792 +
5793 cas [%o1], %g1, %g7
5794 cmp %g1, %g7
5795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5796 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5797 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800 - add %g1, %o0, %g7
5801 + addcc %g1, %o0, %g7
5802 +
5803 +#ifdef CONFIG_PAX_REFCOUNT
5804 + tvs %xcc, 6
5805 +#endif
5806 +
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5810 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_add, .-atomic64_add
5813
5814 + .globl atomic64_add_unchecked
5815 + .type atomic64_add_unchecked,#function
5816 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5817 + BACKOFF_SETUP(%o2)
5818 +1: ldx [%o1], %g1
5819 + addcc %g1, %o0, %g7
5820 + casx [%o1], %g1, %g7
5821 + cmp %g1, %g7
5822 + bne,pn %xcc, 2f
5823 + nop
5824 + retl
5825 + nop
5826 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5827 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5828 +
5829 .globl atomic64_sub
5830 .type atomic64_sub,#function
5831 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834 - sub %g1, %o0, %g7
5835 + subcc %g1, %o0, %g7
5836 +
5837 +#ifdef CONFIG_PAX_REFCOUNT
5838 + tvs %xcc, 6
5839 +#endif
5840 +
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5844 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_sub, .-atomic64_sub
5847
5848 + .globl atomic64_sub_unchecked
5849 + .type atomic64_sub_unchecked,#function
5850 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5851 + BACKOFF_SETUP(%o2)
5852 +1: ldx [%o1], %g1
5853 + subcc %g1, %o0, %g7
5854 + casx [%o1], %g1, %g7
5855 + cmp %g1, %g7
5856 + bne,pn %xcc, 2f
5857 + nop
5858 + retl
5859 + nop
5860 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5861 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5862 +
5863 .globl atomic64_add_ret
5864 .type atomic64_add_ret,#function
5865 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5866 BACKOFF_SETUP(%o2)
5867 1: ldx [%o1], %g1
5868 - add %g1, %o0, %g7
5869 + addcc %g1, %o0, %g7
5870 +
5871 +#ifdef CONFIG_PAX_REFCOUNT
5872 + tvs %xcc, 6
5873 +#endif
5874 +
5875 casx [%o1], %g1, %g7
5876 cmp %g1, %g7
5877 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5878 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5879 2: BACKOFF_SPIN(%o2, %o3, 1b)
5880 .size atomic64_add_ret, .-atomic64_add_ret
5881
5882 + .globl atomic64_add_ret_unchecked
5883 + .type atomic64_add_ret_unchecked,#function
5884 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5885 + BACKOFF_SETUP(%o2)
5886 +1: ldx [%o1], %g1
5887 + addcc %g1, %o0, %g7
5888 + casx [%o1], %g1, %g7
5889 + cmp %g1, %g7
5890 + bne,pn %xcc, 2f
5891 + add %g7, %o0, %g7
5892 + mov %g7, %o0
5893 + retl
5894 + nop
5895 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5896 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5897 +
5898 .globl atomic64_sub_ret
5899 .type atomic64_sub_ret,#function
5900 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5901 BACKOFF_SETUP(%o2)
5902 1: ldx [%o1], %g1
5903 - sub %g1, %o0, %g7
5904 + subcc %g1, %o0, %g7
5905 +
5906 +#ifdef CONFIG_PAX_REFCOUNT
5907 + tvs %xcc, 6
5908 +#endif
5909 +
5910 casx [%o1], %g1, %g7
5911 cmp %g1, %g7
5912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5913 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5914 index f73c224..662af10 100644
5915 --- a/arch/sparc/lib/ksyms.c
5916 +++ b/arch/sparc/lib/ksyms.c
5917 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
5918
5919 /* Atomic counter implementation. */
5920 EXPORT_SYMBOL(atomic_add);
5921 +EXPORT_SYMBOL(atomic_add_unchecked);
5922 EXPORT_SYMBOL(atomic_add_ret);
5923 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5924 EXPORT_SYMBOL(atomic_sub);
5925 +EXPORT_SYMBOL(atomic_sub_unchecked);
5926 EXPORT_SYMBOL(atomic_sub_ret);
5927 EXPORT_SYMBOL(atomic64_add);
5928 +EXPORT_SYMBOL(atomic64_add_unchecked);
5929 EXPORT_SYMBOL(atomic64_add_ret);
5930 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5931 EXPORT_SYMBOL(atomic64_sub);
5932 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5933 EXPORT_SYMBOL(atomic64_sub_ret);
5934
5935 /* Atomic bit operations. */
5936 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5937 index 301421c..e2535d1 100644
5938 --- a/arch/sparc/mm/Makefile
5939 +++ b/arch/sparc/mm/Makefile
5940 @@ -2,7 +2,7 @@
5941 #
5942
5943 asflags-y := -ansi
5944 -ccflags-y := -Werror
5945 +#ccflags-y := -Werror
5946
5947 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5948 obj-y += fault_$(BITS).o
5949 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5950 index 8023fd7..c8e89e9 100644
5951 --- a/arch/sparc/mm/fault_32.c
5952 +++ b/arch/sparc/mm/fault_32.c
5953 @@ -21,6 +21,9 @@
5954 #include <linux/perf_event.h>
5955 #include <linux/interrupt.h>
5956 #include <linux/kdebug.h>
5957 +#include <linux/slab.h>
5958 +#include <linux/pagemap.h>
5959 +#include <linux/compiler.h>
5960
5961 #include <asm/system.h>
5962 #include <asm/page.h>
5963 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5964 return safe_compute_effective_address(regs, insn);
5965 }
5966
5967 +#ifdef CONFIG_PAX_PAGEEXEC
5968 +#ifdef CONFIG_PAX_DLRESOLVE
5969 +static void pax_emuplt_close(struct vm_area_struct *vma)
5970 +{
5971 + vma->vm_mm->call_dl_resolve = 0UL;
5972 +}
5973 +
5974 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5975 +{
5976 + unsigned int *kaddr;
5977 +
5978 + vmf->page = alloc_page(GFP_HIGHUSER);
5979 + if (!vmf->page)
5980 + return VM_FAULT_OOM;
5981 +
5982 + kaddr = kmap(vmf->page);
5983 + memset(kaddr, 0, PAGE_SIZE);
5984 + kaddr[0] = 0x9DE3BFA8U; /* save */
5985 + flush_dcache_page(vmf->page);
5986 + kunmap(vmf->page);
5987 + return VM_FAULT_MAJOR;
5988 +}
5989 +
5990 +static const struct vm_operations_struct pax_vm_ops = {
5991 + .close = pax_emuplt_close,
5992 + .fault = pax_emuplt_fault
5993 +};
5994 +
5995 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5996 +{
5997 + int ret;
5998 +
5999 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6000 + vma->vm_mm = current->mm;
6001 + vma->vm_start = addr;
6002 + vma->vm_end = addr + PAGE_SIZE;
6003 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6004 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6005 + vma->vm_ops = &pax_vm_ops;
6006 +
6007 + ret = insert_vm_struct(current->mm, vma);
6008 + if (ret)
6009 + return ret;
6010 +
6011 + ++current->mm->total_vm;
6012 + return 0;
6013 +}
6014 +#endif
6015 +
6016 +/*
6017 + * PaX: decide what to do with offenders (regs->pc = fault address)
6018 + *
6019 + * returns 1 when task should be killed
6020 + * 2 when patched PLT trampoline was detected
6021 + * 3 when unpatched PLT trampoline was detected
6022 + */
6023 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6024 +{
6025 +
6026 +#ifdef CONFIG_PAX_EMUPLT
6027 + int err;
6028 +
6029 + do { /* PaX: patched PLT emulation #1 */
6030 + unsigned int sethi1, sethi2, jmpl;
6031 +
6032 + err = get_user(sethi1, (unsigned int *)regs->pc);
6033 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6034 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6035 +
6036 + if (err)
6037 + break;
6038 +
6039 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6040 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6041 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6042 + {
6043 + unsigned int addr;
6044 +
6045 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6046 + addr = regs->u_regs[UREG_G1];
6047 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6048 + regs->pc = addr;
6049 + regs->npc = addr+4;
6050 + return 2;
6051 + }
6052 + } while (0);
6053 +
6054 + { /* PaX: patched PLT emulation #2 */
6055 + unsigned int ba;
6056 +
6057 + err = get_user(ba, (unsigned int *)regs->pc);
6058 +
6059 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6060 + unsigned int addr;
6061 +
6062 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6063 + regs->pc = addr;
6064 + regs->npc = addr+4;
6065 + return 2;
6066 + }
6067 + }
6068 +
6069 + do { /* PaX: patched PLT emulation #3 */
6070 + unsigned int sethi, jmpl, nop;
6071 +
6072 + err = get_user(sethi, (unsigned int *)regs->pc);
6073 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6074 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6075 +
6076 + if (err)
6077 + break;
6078 +
6079 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6080 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6081 + nop == 0x01000000U)
6082 + {
6083 + unsigned int addr;
6084 +
6085 + addr = (sethi & 0x003FFFFFU) << 10;
6086 + regs->u_regs[UREG_G1] = addr;
6087 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6088 + regs->pc = addr;
6089 + regs->npc = addr+4;
6090 + return 2;
6091 + }
6092 + } while (0);
6093 +
6094 + do { /* PaX: unpatched PLT emulation step 1 */
6095 + unsigned int sethi, ba, nop;
6096 +
6097 + err = get_user(sethi, (unsigned int *)regs->pc);
6098 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6099 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6100 +
6101 + if (err)
6102 + break;
6103 +
6104 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6105 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6106 + nop == 0x01000000U)
6107 + {
6108 + unsigned int addr, save, call;
6109 +
6110 + if ((ba & 0xFFC00000U) == 0x30800000U)
6111 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6112 + else
6113 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6114 +
6115 + err = get_user(save, (unsigned int *)addr);
6116 + err |= get_user(call, (unsigned int *)(addr+4));
6117 + err |= get_user(nop, (unsigned int *)(addr+8));
6118 + if (err)
6119 + break;
6120 +
6121 +#ifdef CONFIG_PAX_DLRESOLVE
6122 + if (save == 0x9DE3BFA8U &&
6123 + (call & 0xC0000000U) == 0x40000000U &&
6124 + nop == 0x01000000U)
6125 + {
6126 + struct vm_area_struct *vma;
6127 + unsigned long call_dl_resolve;
6128 +
6129 + down_read(&current->mm->mmap_sem);
6130 + call_dl_resolve = current->mm->call_dl_resolve;
6131 + up_read(&current->mm->mmap_sem);
6132 + if (likely(call_dl_resolve))
6133 + goto emulate;
6134 +
6135 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6136 +
6137 + down_write(&current->mm->mmap_sem);
6138 + if (current->mm->call_dl_resolve) {
6139 + call_dl_resolve = current->mm->call_dl_resolve;
6140 + up_write(&current->mm->mmap_sem);
6141 + if (vma)
6142 + kmem_cache_free(vm_area_cachep, vma);
6143 + goto emulate;
6144 + }
6145 +
6146 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6147 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6148 + up_write(&current->mm->mmap_sem);
6149 + if (vma)
6150 + kmem_cache_free(vm_area_cachep, vma);
6151 + return 1;
6152 + }
6153 +
6154 + if (pax_insert_vma(vma, call_dl_resolve)) {
6155 + up_write(&current->mm->mmap_sem);
6156 + kmem_cache_free(vm_area_cachep, vma);
6157 + return 1;
6158 + }
6159 +
6160 + current->mm->call_dl_resolve = call_dl_resolve;
6161 + up_write(&current->mm->mmap_sem);
6162 +
6163 +emulate:
6164 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6165 + regs->pc = call_dl_resolve;
6166 + regs->npc = addr+4;
6167 + return 3;
6168 + }
6169 +#endif
6170 +
6171 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6172 + if ((save & 0xFFC00000U) == 0x05000000U &&
6173 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6174 + nop == 0x01000000U)
6175 + {
6176 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6177 + regs->u_regs[UREG_G2] = addr + 4;
6178 + addr = (save & 0x003FFFFFU) << 10;
6179 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6180 + regs->pc = addr;
6181 + regs->npc = addr+4;
6182 + return 3;
6183 + }
6184 + }
6185 + } while (0);
6186 +
6187 + do { /* PaX: unpatched PLT emulation step 2 */
6188 + unsigned int save, call, nop;
6189 +
6190 + err = get_user(save, (unsigned int *)(regs->pc-4));
6191 + err |= get_user(call, (unsigned int *)regs->pc);
6192 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6193 + if (err)
6194 + break;
6195 +
6196 + if (save == 0x9DE3BFA8U &&
6197 + (call & 0xC0000000U) == 0x40000000U &&
6198 + nop == 0x01000000U)
6199 + {
6200 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6201 +
6202 + regs->u_regs[UREG_RETPC] = regs->pc;
6203 + regs->pc = dl_resolve;
6204 + regs->npc = dl_resolve+4;
6205 + return 3;
6206 + }
6207 + } while (0);
6208 +#endif
6209 +
6210 + return 1;
6211 +}
6212 +
6213 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6214 +{
6215 + unsigned long i;
6216 +
6217 + printk(KERN_ERR "PAX: bytes at PC: ");
6218 + for (i = 0; i < 8; i++) {
6219 + unsigned int c;
6220 + if (get_user(c, (unsigned int *)pc+i))
6221 + printk(KERN_CONT "???????? ");
6222 + else
6223 + printk(KERN_CONT "%08x ", c);
6224 + }
6225 + printk("\n");
6226 +}
6227 +#endif
6228 +
6229 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6230 int text_fault)
6231 {
6232 @@ -280,6 +545,24 @@ good_area:
6233 if(!(vma->vm_flags & VM_WRITE))
6234 goto bad_area;
6235 } else {
6236 +
6237 +#ifdef CONFIG_PAX_PAGEEXEC
6238 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6239 + up_read(&mm->mmap_sem);
6240 + switch (pax_handle_fetch_fault(regs)) {
6241 +
6242 +#ifdef CONFIG_PAX_EMUPLT
6243 + case 2:
6244 + case 3:
6245 + return;
6246 +#endif
6247 +
6248 + }
6249 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6250 + do_group_exit(SIGKILL);
6251 + }
6252 +#endif
6253 +
6254 /* Allow reads even for write-only mappings */
6255 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6256 goto bad_area;
6257 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6258 index 504c062..6fcb9c6 100644
6259 --- a/arch/sparc/mm/fault_64.c
6260 +++ b/arch/sparc/mm/fault_64.c
6261 @@ -21,6 +21,9 @@
6262 #include <linux/kprobes.h>
6263 #include <linux/kdebug.h>
6264 #include <linux/percpu.h>
6265 +#include <linux/slab.h>
6266 +#include <linux/pagemap.h>
6267 +#include <linux/compiler.h>
6268
6269 #include <asm/page.h>
6270 #include <asm/pgtable.h>
6271 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6272 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6273 regs->tpc);
6274 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6275 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6276 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6277 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6278 dump_stack();
6279 unhandled_fault(regs->tpc, current, regs);
6280 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6281 show_regs(regs);
6282 }
6283
6284 +#ifdef CONFIG_PAX_PAGEEXEC
6285 +#ifdef CONFIG_PAX_DLRESOLVE
6286 +static void pax_emuplt_close(struct vm_area_struct *vma)
6287 +{
6288 + vma->vm_mm->call_dl_resolve = 0UL;
6289 +}
6290 +
6291 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6292 +{
6293 + unsigned int *kaddr;
6294 +
6295 + vmf->page = alloc_page(GFP_HIGHUSER);
6296 + if (!vmf->page)
6297 + return VM_FAULT_OOM;
6298 +
6299 + kaddr = kmap(vmf->page);
6300 + memset(kaddr, 0, PAGE_SIZE);
6301 + kaddr[0] = 0x9DE3BFA8U; /* save */
6302 + flush_dcache_page(vmf->page);
6303 + kunmap(vmf->page);
6304 + return VM_FAULT_MAJOR;
6305 +}
6306 +
6307 +static const struct vm_operations_struct pax_vm_ops = {
6308 + .close = pax_emuplt_close,
6309 + .fault = pax_emuplt_fault
6310 +};
6311 +
6312 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6313 +{
6314 + int ret;
6315 +
6316 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6317 + vma->vm_mm = current->mm;
6318 + vma->vm_start = addr;
6319 + vma->vm_end = addr + PAGE_SIZE;
6320 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6321 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6322 + vma->vm_ops = &pax_vm_ops;
6323 +
6324 + ret = insert_vm_struct(current->mm, vma);
6325 + if (ret)
6326 + return ret;
6327 +
6328 + ++current->mm->total_vm;
6329 + return 0;
6330 +}
6331 +#endif
6332 +
6333 +/*
6334 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6335 + *
6336 + * returns 1 when task should be killed
6337 + * 2 when patched PLT trampoline was detected
6338 + * 3 when unpatched PLT trampoline was detected
6339 + */
6340 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6341 +{
6342 +
6343 +#ifdef CONFIG_PAX_EMUPLT
6344 + int err;
6345 +
6346 + do { /* PaX: patched PLT emulation #1 */
6347 + unsigned int sethi1, sethi2, jmpl;
6348 +
6349 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6350 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6351 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6352 +
6353 + if (err)
6354 + break;
6355 +
6356 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6357 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6358 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6359 + {
6360 + unsigned long addr;
6361 +
6362 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6363 + addr = regs->u_regs[UREG_G1];
6364 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6365 +
6366 + if (test_thread_flag(TIF_32BIT))
6367 + addr &= 0xFFFFFFFFUL;
6368 +
6369 + regs->tpc = addr;
6370 + regs->tnpc = addr+4;
6371 + return 2;
6372 + }
6373 + } while (0);
6374 +
6375 + { /* PaX: patched PLT emulation #2 */
6376 + unsigned int ba;
6377 +
6378 + err = get_user(ba, (unsigned int *)regs->tpc);
6379 +
6380 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6381 + unsigned long addr;
6382 +
6383 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6384 +
6385 + if (test_thread_flag(TIF_32BIT))
6386 + addr &= 0xFFFFFFFFUL;
6387 +
6388 + regs->tpc = addr;
6389 + regs->tnpc = addr+4;
6390 + return 2;
6391 + }
6392 + }
6393 +
6394 + do { /* PaX: patched PLT emulation #3 */
6395 + unsigned int sethi, jmpl, nop;
6396 +
6397 + err = get_user(sethi, (unsigned int *)regs->tpc);
6398 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6399 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6400 +
6401 + if (err)
6402 + break;
6403 +
6404 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6405 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6406 + nop == 0x01000000U)
6407 + {
6408 + unsigned long addr;
6409 +
6410 + addr = (sethi & 0x003FFFFFU) << 10;
6411 + regs->u_regs[UREG_G1] = addr;
6412 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6413 +
6414 + if (test_thread_flag(TIF_32BIT))
6415 + addr &= 0xFFFFFFFFUL;
6416 +
6417 + regs->tpc = addr;
6418 + regs->tnpc = addr+4;
6419 + return 2;
6420 + }
6421 + } while (0);
6422 +
6423 + do { /* PaX: patched PLT emulation #4 */
6424 + unsigned int sethi, mov1, call, mov2;
6425 +
6426 + err = get_user(sethi, (unsigned int *)regs->tpc);
6427 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6428 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6429 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6430 +
6431 + if (err)
6432 + break;
6433 +
6434 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6435 + mov1 == 0x8210000FU &&
6436 + (call & 0xC0000000U) == 0x40000000U &&
6437 + mov2 == 0x9E100001U)
6438 + {
6439 + unsigned long addr;
6440 +
6441 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6442 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6443 +
6444 + if (test_thread_flag(TIF_32BIT))
6445 + addr &= 0xFFFFFFFFUL;
6446 +
6447 + regs->tpc = addr;
6448 + regs->tnpc = addr+4;
6449 + return 2;
6450 + }
6451 + } while (0);
6452 +
6453 + do { /* PaX: patched PLT emulation #5 */
6454 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6455 +
6456 + err = get_user(sethi, (unsigned int *)regs->tpc);
6457 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6458 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6459 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6460 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6461 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6462 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6463 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6464 +
6465 + if (err)
6466 + break;
6467 +
6468 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6469 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6470 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6471 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6472 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6473 + sllx == 0x83287020U &&
6474 + jmpl == 0x81C04005U &&
6475 + nop == 0x01000000U)
6476 + {
6477 + unsigned long addr;
6478 +
6479 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6480 + regs->u_regs[UREG_G1] <<= 32;
6481 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6482 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6483 + regs->tpc = addr;
6484 + regs->tnpc = addr+4;
6485 + return 2;
6486 + }
6487 + } while (0);
6488 +
6489 + do { /* PaX: patched PLT emulation #6 */
6490 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6491 +
6492 + err = get_user(sethi, (unsigned int *)regs->tpc);
6493 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6494 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6495 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6496 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6497 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6498 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6499 +
6500 + if (err)
6501 + break;
6502 +
6503 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6504 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6505 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6506 + sllx == 0x83287020U &&
6507 + (or & 0xFFFFE000U) == 0x8A116000U &&
6508 + jmpl == 0x81C04005U &&
6509 + nop == 0x01000000U)
6510 + {
6511 + unsigned long addr;
6512 +
6513 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6514 + regs->u_regs[UREG_G1] <<= 32;
6515 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6516 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6517 + regs->tpc = addr;
6518 + regs->tnpc = addr+4;
6519 + return 2;
6520 + }
6521 + } while (0);
6522 +
6523 + do { /* PaX: unpatched PLT emulation step 1 */
6524 + unsigned int sethi, ba, nop;
6525 +
6526 + err = get_user(sethi, (unsigned int *)regs->tpc);
6527 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6528 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6529 +
6530 + if (err)
6531 + break;
6532 +
6533 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6534 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6535 + nop == 0x01000000U)
6536 + {
6537 + unsigned long addr;
6538 + unsigned int save, call;
6539 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6540 +
6541 + if ((ba & 0xFFC00000U) == 0x30800000U)
6542 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6543 + else
6544 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6545 +
6546 + if (test_thread_flag(TIF_32BIT))
6547 + addr &= 0xFFFFFFFFUL;
6548 +
6549 + err = get_user(save, (unsigned int *)addr);
6550 + err |= get_user(call, (unsigned int *)(addr+4));
6551 + err |= get_user(nop, (unsigned int *)(addr+8));
6552 + if (err)
6553 + break;
6554 +
6555 +#ifdef CONFIG_PAX_DLRESOLVE
6556 + if (save == 0x9DE3BFA8U &&
6557 + (call & 0xC0000000U) == 0x40000000U &&
6558 + nop == 0x01000000U)
6559 + {
6560 + struct vm_area_struct *vma;
6561 + unsigned long call_dl_resolve;
6562 +
6563 + down_read(&current->mm->mmap_sem);
6564 + call_dl_resolve = current->mm->call_dl_resolve;
6565 + up_read(&current->mm->mmap_sem);
6566 + if (likely(call_dl_resolve))
6567 + goto emulate;
6568 +
6569 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6570 +
6571 + down_write(&current->mm->mmap_sem);
6572 + if (current->mm->call_dl_resolve) {
6573 + call_dl_resolve = current->mm->call_dl_resolve;
6574 + up_write(&current->mm->mmap_sem);
6575 + if (vma)
6576 + kmem_cache_free(vm_area_cachep, vma);
6577 + goto emulate;
6578 + }
6579 +
6580 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6581 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6582 + up_write(&current->mm->mmap_sem);
6583 + if (vma)
6584 + kmem_cache_free(vm_area_cachep, vma);
6585 + return 1;
6586 + }
6587 +
6588 + if (pax_insert_vma(vma, call_dl_resolve)) {
6589 + up_write(&current->mm->mmap_sem);
6590 + kmem_cache_free(vm_area_cachep, vma);
6591 + return 1;
6592 + }
6593 +
6594 + current->mm->call_dl_resolve = call_dl_resolve;
6595 + up_write(&current->mm->mmap_sem);
6596 +
6597 +emulate:
6598 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6599 + regs->tpc = call_dl_resolve;
6600 + regs->tnpc = addr+4;
6601 + return 3;
6602 + }
6603 +#endif
6604 +
6605 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6606 + if ((save & 0xFFC00000U) == 0x05000000U &&
6607 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6608 + nop == 0x01000000U)
6609 + {
6610 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6611 + regs->u_regs[UREG_G2] = addr + 4;
6612 + addr = (save & 0x003FFFFFU) << 10;
6613 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6614 +
6615 + if (test_thread_flag(TIF_32BIT))
6616 + addr &= 0xFFFFFFFFUL;
6617 +
6618 + regs->tpc = addr;
6619 + regs->tnpc = addr+4;
6620 + return 3;
6621 + }
6622 +
6623 + /* PaX: 64-bit PLT stub */
6624 + err = get_user(sethi1, (unsigned int *)addr);
6625 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6626 + err |= get_user(or1, (unsigned int *)(addr+8));
6627 + err |= get_user(or2, (unsigned int *)(addr+12));
6628 + err |= get_user(sllx, (unsigned int *)(addr+16));
6629 + err |= get_user(add, (unsigned int *)(addr+20));
6630 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6631 + err |= get_user(nop, (unsigned int *)(addr+28));
6632 + if (err)
6633 + break;
6634 +
6635 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6636 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6637 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6638 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6639 + sllx == 0x89293020U &&
6640 + add == 0x8A010005U &&
6641 + jmpl == 0x89C14000U &&
6642 + nop == 0x01000000U)
6643 + {
6644 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6645 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6646 + regs->u_regs[UREG_G4] <<= 32;
6647 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6648 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6649 + regs->u_regs[UREG_G4] = addr + 24;
6650 + addr = regs->u_regs[UREG_G5];
6651 + regs->tpc = addr;
6652 + regs->tnpc = addr+4;
6653 + return 3;
6654 + }
6655 + }
6656 + } while (0);
6657 +
6658 +#ifdef CONFIG_PAX_DLRESOLVE
6659 + do { /* PaX: unpatched PLT emulation step 2 */
6660 + unsigned int save, call, nop;
6661 +
6662 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6663 + err |= get_user(call, (unsigned int *)regs->tpc);
6664 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6665 + if (err)
6666 + break;
6667 +
6668 + if (save == 0x9DE3BFA8U &&
6669 + (call & 0xC0000000U) == 0x40000000U &&
6670 + nop == 0x01000000U)
6671 + {
6672 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6673 +
6674 + if (test_thread_flag(TIF_32BIT))
6675 + dl_resolve &= 0xFFFFFFFFUL;
6676 +
6677 + regs->u_regs[UREG_RETPC] = regs->tpc;
6678 + regs->tpc = dl_resolve;
6679 + regs->tnpc = dl_resolve+4;
6680 + return 3;
6681 + }
6682 + } while (0);
6683 +#endif
6684 +
6685 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6686 + unsigned int sethi, ba, nop;
6687 +
6688 + err = get_user(sethi, (unsigned int *)regs->tpc);
6689 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6690 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6691 +
6692 + if (err)
6693 + break;
6694 +
6695 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6696 + (ba & 0xFFF00000U) == 0x30600000U &&
6697 + nop == 0x01000000U)
6698 + {
6699 + unsigned long addr;
6700 +
6701 + addr = (sethi & 0x003FFFFFU) << 10;
6702 + regs->u_regs[UREG_G1] = addr;
6703 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6704 +
6705 + if (test_thread_flag(TIF_32BIT))
6706 + addr &= 0xFFFFFFFFUL;
6707 +
6708 + regs->tpc = addr;
6709 + regs->tnpc = addr+4;
6710 + return 2;
6711 + }
6712 + } while (0);
6713 +
6714 +#endif
6715 +
6716 + return 1;
6717 +}
6718 +
6719 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6720 +{
6721 + unsigned long i;
6722 +
6723 + printk(KERN_ERR "PAX: bytes at PC: ");
6724 + for (i = 0; i < 8; i++) {
6725 + unsigned int c;
6726 + if (get_user(c, (unsigned int *)pc+i))
6727 + printk(KERN_CONT "???????? ");
6728 + else
6729 + printk(KERN_CONT "%08x ", c);
6730 + }
6731 + printk("\n");
6732 +}
6733 +#endif
6734 +
6735 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6736 {
6737 struct mm_struct *mm = current->mm;
6738 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6739 if (!vma)
6740 goto bad_area;
6741
6742 +#ifdef CONFIG_PAX_PAGEEXEC
6743 + /* PaX: detect ITLB misses on non-exec pages */
6744 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6745 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6746 + {
6747 + if (address != regs->tpc)
6748 + goto good_area;
6749 +
6750 + up_read(&mm->mmap_sem);
6751 + switch (pax_handle_fetch_fault(regs)) {
6752 +
6753 +#ifdef CONFIG_PAX_EMUPLT
6754 + case 2:
6755 + case 3:
6756 + return;
6757 +#endif
6758 +
6759 + }
6760 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6761 + do_group_exit(SIGKILL);
6762 + }
6763 +#endif
6764 +
6765 /* Pure DTLB misses do not tell us whether the fault causing
6766 * load/store/atomic was a write or not, it only says that there
6767 * was no match. So in such a case we (carefully) read the
6768 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6769 index 07e1453..0a7d9e9 100644
6770 --- a/arch/sparc/mm/hugetlbpage.c
6771 +++ b/arch/sparc/mm/hugetlbpage.c
6772 @@ -67,7 +67,7 @@ full_search:
6773 }
6774 return -ENOMEM;
6775 }
6776 - if (likely(!vma || addr + len <= vma->vm_start)) {
6777 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6778 /*
6779 * Remember the place where we stopped the search:
6780 */
6781 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6782 /* make sure it can fit in the remaining address space */
6783 if (likely(addr > len)) {
6784 vma = find_vma(mm, addr-len);
6785 - if (!vma || addr <= vma->vm_start) {
6786 + if (check_heap_stack_gap(vma, addr - len, len)) {
6787 /* remember the address as a hint for next time */
6788 return (mm->free_area_cache = addr-len);
6789 }
6790 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6791 if (unlikely(mm->mmap_base < len))
6792 goto bottomup;
6793
6794 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6795 + addr = mm->mmap_base - len;
6796
6797 do {
6798 + addr &= HPAGE_MASK;
6799 /*
6800 * Lookup failure means no vma is above this address,
6801 * else if new region fits below vma->vm_start,
6802 * return with success:
6803 */
6804 vma = find_vma(mm, addr);
6805 - if (likely(!vma || addr+len <= vma->vm_start)) {
6806 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6807 /* remember the address as a hint for next time */
6808 return (mm->free_area_cache = addr);
6809 }
6810 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6811 mm->cached_hole_size = vma->vm_start - addr;
6812
6813 /* try just below the current vma->vm_start */
6814 - addr = (vma->vm_start-len) & HPAGE_MASK;
6815 - } while (likely(len < vma->vm_start));
6816 + addr = skip_heap_stack_gap(vma, len);
6817 + } while (!IS_ERR_VALUE(addr));
6818
6819 bottomup:
6820 /*
6821 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6822 if (addr) {
6823 addr = ALIGN(addr, HPAGE_SIZE);
6824 vma = find_vma(mm, addr);
6825 - if (task_size - len >= addr &&
6826 - (!vma || addr + len <= vma->vm_start))
6827 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6828 return addr;
6829 }
6830 if (mm->get_unmapped_area == arch_get_unmapped_area)
6831 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6832 index 7b00de6..78239f4 100644
6833 --- a/arch/sparc/mm/init_32.c
6834 +++ b/arch/sparc/mm/init_32.c
6835 @@ -316,6 +316,9 @@ extern void device_scan(void);
6836 pgprot_t PAGE_SHARED __read_mostly;
6837 EXPORT_SYMBOL(PAGE_SHARED);
6838
6839 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6840 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6841 +
6842 void __init paging_init(void)
6843 {
6844 switch(sparc_cpu_model) {
6845 @@ -344,17 +347,17 @@ void __init paging_init(void)
6846
6847 /* Initialize the protection map with non-constant, MMU dependent values. */
6848 protection_map[0] = PAGE_NONE;
6849 - protection_map[1] = PAGE_READONLY;
6850 - protection_map[2] = PAGE_COPY;
6851 - protection_map[3] = PAGE_COPY;
6852 + protection_map[1] = PAGE_READONLY_NOEXEC;
6853 + protection_map[2] = PAGE_COPY_NOEXEC;
6854 + protection_map[3] = PAGE_COPY_NOEXEC;
6855 protection_map[4] = PAGE_READONLY;
6856 protection_map[5] = PAGE_READONLY;
6857 protection_map[6] = PAGE_COPY;
6858 protection_map[7] = PAGE_COPY;
6859 protection_map[8] = PAGE_NONE;
6860 - protection_map[9] = PAGE_READONLY;
6861 - protection_map[10] = PAGE_SHARED;
6862 - protection_map[11] = PAGE_SHARED;
6863 + protection_map[9] = PAGE_READONLY_NOEXEC;
6864 + protection_map[10] = PAGE_SHARED_NOEXEC;
6865 + protection_map[11] = PAGE_SHARED_NOEXEC;
6866 protection_map[12] = PAGE_READONLY;
6867 protection_map[13] = PAGE_READONLY;
6868 protection_map[14] = PAGE_SHARED;
6869 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6870 index cbef74e..c38fead 100644
6871 --- a/arch/sparc/mm/srmmu.c
6872 +++ b/arch/sparc/mm/srmmu.c
6873 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6874 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6875 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6876 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6877 +
6878 +#ifdef CONFIG_PAX_PAGEEXEC
6879 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6880 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6881 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6882 +#endif
6883 +
6884 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6885 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6886
6887 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6888 index 27fe667..36d474c 100644
6889 --- a/arch/tile/include/asm/atomic_64.h
6890 +++ b/arch/tile/include/asm/atomic_64.h
6891 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6892
6893 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6894
6895 +#define atomic64_read_unchecked(v) atomic64_read(v)
6896 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6897 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6898 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6899 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6900 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6901 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6902 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6903 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6904 +
6905 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6906 #define smp_mb__before_atomic_dec() smp_mb()
6907 #define smp_mb__after_atomic_dec() smp_mb()
6908 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6909 index 392e533..536b092 100644
6910 --- a/arch/tile/include/asm/cache.h
6911 +++ b/arch/tile/include/asm/cache.h
6912 @@ -15,11 +15,12 @@
6913 #ifndef _ASM_TILE_CACHE_H
6914 #define _ASM_TILE_CACHE_H
6915
6916 +#include <linux/const.h>
6917 #include <arch/chip.h>
6918
6919 /* bytes per L1 data cache line */
6920 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6921 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6922 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6923
6924 /* bytes per L2 cache line */
6925 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6926 diff --git a/arch/um/Makefile b/arch/um/Makefile
6927 index 28688e6..4c0aa1c 100644
6928 --- a/arch/um/Makefile
6929 +++ b/arch/um/Makefile
6930 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6931 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6932 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6933
6934 +ifdef CONSTIFY_PLUGIN
6935 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6936 +endif
6937 +
6938 #This will adjust *FLAGS accordingly to the platform.
6939 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6940
6941 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6942 index 19e1bdd..3665b77 100644
6943 --- a/arch/um/include/asm/cache.h
6944 +++ b/arch/um/include/asm/cache.h
6945 @@ -1,6 +1,7 @@
6946 #ifndef __UM_CACHE_H
6947 #define __UM_CACHE_H
6948
6949 +#include <linux/const.h>
6950
6951 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6952 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6953 @@ -12,6 +13,6 @@
6954 # define L1_CACHE_SHIFT 5
6955 #endif
6956
6957 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6958 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6959
6960 #endif
6961 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6962 index 6c03acd..a5e0215 100644
6963 --- a/arch/um/include/asm/kmap_types.h
6964 +++ b/arch/um/include/asm/kmap_types.h
6965 @@ -23,6 +23,7 @@ enum km_type {
6966 KM_IRQ1,
6967 KM_SOFTIRQ0,
6968 KM_SOFTIRQ1,
6969 + KM_CLEARPAGE,
6970 KM_TYPE_NR
6971 };
6972
6973 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6974 index 7cfc3ce..cbd1a58 100644
6975 --- a/arch/um/include/asm/page.h
6976 +++ b/arch/um/include/asm/page.h
6977 @@ -14,6 +14,9 @@
6978 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6979 #define PAGE_MASK (~(PAGE_SIZE-1))
6980
6981 +#define ktla_ktva(addr) (addr)
6982 +#define ktva_ktla(addr) (addr)
6983 +
6984 #ifndef __ASSEMBLY__
6985
6986 struct page;
6987 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6988 index 69f2490..2634831 100644
6989 --- a/arch/um/kernel/process.c
6990 +++ b/arch/um/kernel/process.c
6991 @@ -408,22 +408,6 @@ int singlestepping(void * t)
6992 return 2;
6993 }
6994
6995 -/*
6996 - * Only x86 and x86_64 have an arch_align_stack().
6997 - * All other arches have "#define arch_align_stack(x) (x)"
6998 - * in their asm/system.h
6999 - * As this is included in UML from asm-um/system-generic.h,
7000 - * we can use it to behave as the subarch does.
7001 - */
7002 -#ifndef arch_align_stack
7003 -unsigned long arch_align_stack(unsigned long sp)
7004 -{
7005 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7006 - sp -= get_random_int() % 8192;
7007 - return sp & ~0xf;
7008 -}
7009 -#endif
7010 -
7011 unsigned long get_wchan(struct task_struct *p)
7012 {
7013 unsigned long stack_page, sp, ip;
7014 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7015 index ad8f795..2c7eec6 100644
7016 --- a/arch/unicore32/include/asm/cache.h
7017 +++ b/arch/unicore32/include/asm/cache.h
7018 @@ -12,8 +12,10 @@
7019 #ifndef __UNICORE_CACHE_H__
7020 #define __UNICORE_CACHE_H__
7021
7022 -#define L1_CACHE_SHIFT (5)
7023 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7024 +#include <linux/const.h>
7025 +
7026 +#define L1_CACHE_SHIFT 5
7027 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7028
7029 /*
7030 * Memory returned by kmalloc() may be used for DMA, so we must make
7031 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7032 index 5bed94e..fbcf200 100644
7033 --- a/arch/x86/Kconfig
7034 +++ b/arch/x86/Kconfig
7035 @@ -226,7 +226,7 @@ config X86_HT
7036
7037 config X86_32_LAZY_GS
7038 def_bool y
7039 - depends on X86_32 && !CC_STACKPROTECTOR
7040 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7041
7042 config ARCH_HWEIGHT_CFLAGS
7043 string
7044 @@ -1058,7 +1058,7 @@ choice
7045
7046 config NOHIGHMEM
7047 bool "off"
7048 - depends on !X86_NUMAQ
7049 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7050 ---help---
7051 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7052 However, the address space of 32-bit x86 processors is only 4
7053 @@ -1095,7 +1095,7 @@ config NOHIGHMEM
7054
7055 config HIGHMEM4G
7056 bool "4GB"
7057 - depends on !X86_NUMAQ
7058 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7059 ---help---
7060 Select this if you have a 32-bit processor and between 1 and 4
7061 gigabytes of physical RAM.
7062 @@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7063 hex
7064 default 0xB0000000 if VMSPLIT_3G_OPT
7065 default 0x80000000 if VMSPLIT_2G
7066 - default 0x78000000 if VMSPLIT_2G_OPT
7067 + default 0x70000000 if VMSPLIT_2G_OPT
7068 default 0x40000000 if VMSPLIT_1G
7069 default 0xC0000000
7070 depends on X86_32
7071 @@ -1539,6 +1539,7 @@ config SECCOMP
7072
7073 config CC_STACKPROTECTOR
7074 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7075 + depends on X86_64 || !PAX_MEMORY_UDEREF
7076 ---help---
7077 This option turns on the -fstack-protector GCC feature. This
7078 feature puts, at the beginning of functions, a canary value on
7079 @@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7080 config PHYSICAL_START
7081 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7082 default "0x1000000"
7083 + range 0x400000 0x40000000
7084 ---help---
7085 This gives the physical address where the kernel is loaded.
7086
7087 @@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7088 config PHYSICAL_ALIGN
7089 hex "Alignment value to which kernel should be aligned" if X86_32
7090 default "0x1000000"
7091 + range 0x400000 0x1000000 if PAX_KERNEXEC
7092 range 0x2000 0x1000000
7093 ---help---
7094 This value puts the alignment restrictions on physical address
7095 @@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7096 Say N if you want to disable CPU hotplug.
7097
7098 config COMPAT_VDSO
7099 - def_bool y
7100 + def_bool n
7101 prompt "Compat VDSO support"
7102 depends on X86_32 || IA32_EMULATION
7103 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7104 ---help---
7105 Map the 32-bit VDSO to the predictable old-style address too.
7106
7107 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7108 index 3c57033..22d44aa 100644
7109 --- a/arch/x86/Kconfig.cpu
7110 +++ b/arch/x86/Kconfig.cpu
7111 @@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7112
7113 config X86_F00F_BUG
7114 def_bool y
7115 - depends on M586MMX || M586TSC || M586 || M486 || M386
7116 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7117
7118 config X86_INVD_BUG
7119 def_bool y
7120 @@ -359,7 +359,7 @@ config X86_POPAD_OK
7121
7122 config X86_ALIGNMENT_16
7123 def_bool y
7124 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7125 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7126
7127 config X86_INTEL_USERCOPY
7128 def_bool y
7129 @@ -405,7 +405,7 @@ config X86_CMPXCHG64
7130 # generates cmov.
7131 config X86_CMOV
7132 def_bool y
7133 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7134 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7135
7136 config X86_MINIMUM_CPU_FAMILY
7137 int
7138 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7139 index e46c214..7c72b55 100644
7140 --- a/arch/x86/Kconfig.debug
7141 +++ b/arch/x86/Kconfig.debug
7142 @@ -84,7 +84,7 @@ config X86_PTDUMP
7143 config DEBUG_RODATA
7144 bool "Write protect kernel read-only data structures"
7145 default y
7146 - depends on DEBUG_KERNEL
7147 + depends on DEBUG_KERNEL && BROKEN
7148 ---help---
7149 Mark the kernel read-only data as write-protected in the pagetables,
7150 in order to catch accidental (and incorrect) writes to such const
7151 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7152
7153 config DEBUG_SET_MODULE_RONX
7154 bool "Set loadable kernel module data as NX and text as RO"
7155 - depends on MODULES
7156 + depends on MODULES && BROKEN
7157 ---help---
7158 This option helps catch unintended modifications to loadable
7159 kernel module's text and read-only data. It also prevents execution
7160 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7161 index 209ba12..15140db 100644
7162 --- a/arch/x86/Makefile
7163 +++ b/arch/x86/Makefile
7164 @@ -46,6 +46,7 @@ else
7165 UTS_MACHINE := x86_64
7166 CHECKFLAGS += -D__x86_64__ -m64
7167
7168 + biarch := $(call cc-option,-m64)
7169 KBUILD_AFLAGS += -m64
7170 KBUILD_CFLAGS += -m64
7171
7172 @@ -201,3 +202,12 @@ define archhelp
7173 echo ' FDARGS="..." arguments for the booted kernel'
7174 echo ' FDINITRD=file initrd for the booted kernel'
7175 endef
7176 +
7177 +define OLD_LD
7178 +
7179 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7180 +*** Please upgrade your binutils to 2.18 or newer
7181 +endef
7182 +
7183 +archprepare:
7184 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7185 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7186 index 95365a8..52f857b 100644
7187 --- a/arch/x86/boot/Makefile
7188 +++ b/arch/x86/boot/Makefile
7189 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7190 $(call cc-option, -fno-stack-protector) \
7191 $(call cc-option, -mpreferred-stack-boundary=2)
7192 KBUILD_CFLAGS += $(call cc-option, -m32)
7193 +ifdef CONSTIFY_PLUGIN
7194 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7195 +endif
7196 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7197 GCOV_PROFILE := n
7198
7199 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7200 index 878e4b9..20537ab 100644
7201 --- a/arch/x86/boot/bitops.h
7202 +++ b/arch/x86/boot/bitops.h
7203 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7204 u8 v;
7205 const u32 *p = (const u32 *)addr;
7206
7207 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7208 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7209 return v;
7210 }
7211
7212 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7213
7214 static inline void set_bit(int nr, void *addr)
7215 {
7216 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7217 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7218 }
7219
7220 #endif /* BOOT_BITOPS_H */
7221 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7222 index c7093bd..d4247ffe0 100644
7223 --- a/arch/x86/boot/boot.h
7224 +++ b/arch/x86/boot/boot.h
7225 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7226 static inline u16 ds(void)
7227 {
7228 u16 seg;
7229 - asm("movw %%ds,%0" : "=rm" (seg));
7230 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7231 return seg;
7232 }
7233
7234 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7235 static inline int memcmp(const void *s1, const void *s2, size_t len)
7236 {
7237 u8 diff;
7238 - asm("repe; cmpsb; setnz %0"
7239 + asm volatile("repe; cmpsb; setnz %0"
7240 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7241 return diff;
7242 }
7243 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7244 index b123b9a..2cf2f23 100644
7245 --- a/arch/x86/boot/compressed/Makefile
7246 +++ b/arch/x86/boot/compressed/Makefile
7247 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7248 KBUILD_CFLAGS += $(cflags-y)
7249 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7250 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7251 +ifdef CONSTIFY_PLUGIN
7252 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7253 +endif
7254
7255 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7256 GCOV_PROFILE := n
7257 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7258 index a055993..47e126c 100644
7259 --- a/arch/x86/boot/compressed/head_32.S
7260 +++ b/arch/x86/boot/compressed/head_32.S
7261 @@ -98,7 +98,7 @@ preferred_addr:
7262 notl %eax
7263 andl %eax, %ebx
7264 #else
7265 - movl $LOAD_PHYSICAL_ADDR, %ebx
7266 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7267 #endif
7268
7269 /* Target address to relocate to for decompression */
7270 @@ -184,7 +184,7 @@ relocated:
7271 * and where it was actually loaded.
7272 */
7273 movl %ebp, %ebx
7274 - subl $LOAD_PHYSICAL_ADDR, %ebx
7275 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7276 jz 2f /* Nothing to be done if loaded at compiled addr. */
7277 /*
7278 * Process relocations.
7279 @@ -192,8 +192,7 @@ relocated:
7280
7281 1: subl $4, %edi
7282 movl (%edi), %ecx
7283 - testl %ecx, %ecx
7284 - jz 2f
7285 + jecxz 2f
7286 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7287 jmp 1b
7288 2:
7289 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7290 index 558d76c..606aa24 100644
7291 --- a/arch/x86/boot/compressed/head_64.S
7292 +++ b/arch/x86/boot/compressed/head_64.S
7293 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7294 notl %eax
7295 andl %eax, %ebx
7296 #else
7297 - movl $LOAD_PHYSICAL_ADDR, %ebx
7298 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7299 #endif
7300
7301 /* Target address to relocate to for decompression */
7302 @@ -253,7 +253,7 @@ preferred_addr:
7303 notq %rax
7304 andq %rax, %rbp
7305 #else
7306 - movq $LOAD_PHYSICAL_ADDR, %rbp
7307 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7308 #endif
7309
7310 /* Target address to relocate to for decompression */
7311 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7312 index 7116dcb..d9ae1d7 100644
7313 --- a/arch/x86/boot/compressed/misc.c
7314 +++ b/arch/x86/boot/compressed/misc.c
7315 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7316 case PT_LOAD:
7317 #ifdef CONFIG_RELOCATABLE
7318 dest = output;
7319 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7320 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7321 #else
7322 dest = (void *)(phdr->p_paddr);
7323 #endif
7324 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7325 error("Destination address too large");
7326 #endif
7327 #ifndef CONFIG_RELOCATABLE
7328 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7329 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7330 error("Wrong destination address");
7331 #endif
7332
7333 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7334 index 89bbf4e..869908e 100644
7335 --- a/arch/x86/boot/compressed/relocs.c
7336 +++ b/arch/x86/boot/compressed/relocs.c
7337 @@ -13,8 +13,11 @@
7338
7339 static void die(char *fmt, ...);
7340
7341 +#include "../../../../include/generated/autoconf.h"
7342 +
7343 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7344 static Elf32_Ehdr ehdr;
7345 +static Elf32_Phdr *phdr;
7346 static unsigned long reloc_count, reloc_idx;
7347 static unsigned long *relocs;
7348
7349 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7350 }
7351 }
7352
7353 +static void read_phdrs(FILE *fp)
7354 +{
7355 + unsigned int i;
7356 +
7357 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7358 + if (!phdr) {
7359 + die("Unable to allocate %d program headers\n",
7360 + ehdr.e_phnum);
7361 + }
7362 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7363 + die("Seek to %d failed: %s\n",
7364 + ehdr.e_phoff, strerror(errno));
7365 + }
7366 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7367 + die("Cannot read ELF program headers: %s\n",
7368 + strerror(errno));
7369 + }
7370 + for(i = 0; i < ehdr.e_phnum; i++) {
7371 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7372 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7373 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7374 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7375 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7376 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7377 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7378 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7379 + }
7380 +
7381 +}
7382 +
7383 static void read_shdrs(FILE *fp)
7384 {
7385 - int i;
7386 + unsigned int i;
7387 Elf32_Shdr shdr;
7388
7389 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7390 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7391
7392 static void read_strtabs(FILE *fp)
7393 {
7394 - int i;
7395 + unsigned int i;
7396 for (i = 0; i < ehdr.e_shnum; i++) {
7397 struct section *sec = &secs[i];
7398 if (sec->shdr.sh_type != SHT_STRTAB) {
7399 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7400
7401 static void read_symtabs(FILE *fp)
7402 {
7403 - int i,j;
7404 + unsigned int i,j;
7405 for (i = 0; i < ehdr.e_shnum; i++) {
7406 struct section *sec = &secs[i];
7407 if (sec->shdr.sh_type != SHT_SYMTAB) {
7408 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7409
7410 static void read_relocs(FILE *fp)
7411 {
7412 - int i,j;
7413 + unsigned int i,j;
7414 + uint32_t base;
7415 +
7416 for (i = 0; i < ehdr.e_shnum; i++) {
7417 struct section *sec = &secs[i];
7418 if (sec->shdr.sh_type != SHT_REL) {
7419 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7420 die("Cannot read symbol table: %s\n",
7421 strerror(errno));
7422 }
7423 + base = 0;
7424 + for (j = 0; j < ehdr.e_phnum; j++) {
7425 + if (phdr[j].p_type != PT_LOAD )
7426 + continue;
7427 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7428 + continue;
7429 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7430 + break;
7431 + }
7432 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7433 Elf32_Rel *rel = &sec->reltab[j];
7434 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7435 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7436 rel->r_info = elf32_to_cpu(rel->r_info);
7437 }
7438 }
7439 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7440
7441 static void print_absolute_symbols(void)
7442 {
7443 - int i;
7444 + unsigned int i;
7445 printf("Absolute symbols\n");
7446 printf(" Num: Value Size Type Bind Visibility Name\n");
7447 for (i = 0; i < ehdr.e_shnum; i++) {
7448 struct section *sec = &secs[i];
7449 char *sym_strtab;
7450 Elf32_Sym *sh_symtab;
7451 - int j;
7452 + unsigned int j;
7453
7454 if (sec->shdr.sh_type != SHT_SYMTAB) {
7455 continue;
7456 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7457
7458 static void print_absolute_relocs(void)
7459 {
7460 - int i, printed = 0;
7461 + unsigned int i, printed = 0;
7462
7463 for (i = 0; i < ehdr.e_shnum; i++) {
7464 struct section *sec = &secs[i];
7465 struct section *sec_applies, *sec_symtab;
7466 char *sym_strtab;
7467 Elf32_Sym *sh_symtab;
7468 - int j;
7469 + unsigned int j;
7470 if (sec->shdr.sh_type != SHT_REL) {
7471 continue;
7472 }
7473 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7474
7475 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7476 {
7477 - int i;
7478 + unsigned int i;
7479 /* Walk through the relocations */
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 char *sym_strtab;
7482 Elf32_Sym *sh_symtab;
7483 struct section *sec_applies, *sec_symtab;
7484 - int j;
7485 + unsigned int j;
7486 struct section *sec = &secs[i];
7487
7488 if (sec->shdr.sh_type != SHT_REL) {
7489 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7490 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7491 continue;
7492 }
7493 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7494 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7495 + continue;
7496 +
7497 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7498 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7499 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7500 + continue;
7501 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7502 + continue;
7503 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7504 + continue;
7505 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7506 + continue;
7507 +#endif
7508 +
7509 switch (r_type) {
7510 case R_386_NONE:
7511 case R_386_PC32:
7512 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7513
7514 static void emit_relocs(int as_text)
7515 {
7516 - int i;
7517 + unsigned int i;
7518 /* Count how many relocations I have and allocate space for them. */
7519 reloc_count = 0;
7520 walk_relocs(count_reloc);
7521 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
7522 fname, strerror(errno));
7523 }
7524 read_ehdr(fp);
7525 + read_phdrs(fp);
7526 read_shdrs(fp);
7527 read_strtabs(fp);
7528 read_symtabs(fp);
7529 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7530 index 4d3ff03..e4972ff 100644
7531 --- a/arch/x86/boot/cpucheck.c
7532 +++ b/arch/x86/boot/cpucheck.c
7533 @@ -74,7 +74,7 @@ static int has_fpu(void)
7534 u16 fcw = -1, fsw = -1;
7535 u32 cr0;
7536
7537 - asm("movl %%cr0,%0" : "=r" (cr0));
7538 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7539 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7540 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7541 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7542 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7543 {
7544 u32 f0, f1;
7545
7546 - asm("pushfl ; "
7547 + asm volatile("pushfl ; "
7548 "pushfl ; "
7549 "popl %0 ; "
7550 "movl %0,%1 ; "
7551 @@ -115,7 +115,7 @@ static void get_flags(void)
7552 set_bit(X86_FEATURE_FPU, cpu.flags);
7553
7554 if (has_eflag(X86_EFLAGS_ID)) {
7555 - asm("cpuid"
7556 + asm volatile("cpuid"
7557 : "=a" (max_intel_level),
7558 "=b" (cpu_vendor[0]),
7559 "=d" (cpu_vendor[1]),
7560 @@ -124,7 +124,7 @@ static void get_flags(void)
7561
7562 if (max_intel_level >= 0x00000001 &&
7563 max_intel_level <= 0x0000ffff) {
7564 - asm("cpuid"
7565 + asm volatile("cpuid"
7566 : "=a" (tfms),
7567 "=c" (cpu.flags[4]),
7568 "=d" (cpu.flags[0])
7569 @@ -136,7 +136,7 @@ static void get_flags(void)
7570 cpu.model += ((tfms >> 16) & 0xf) << 4;
7571 }
7572
7573 - asm("cpuid"
7574 + asm volatile("cpuid"
7575 : "=a" (max_amd_level)
7576 : "a" (0x80000000)
7577 : "ebx", "ecx", "edx");
7578 @@ -144,7 +144,7 @@ static void get_flags(void)
7579 if (max_amd_level >= 0x80000001 &&
7580 max_amd_level <= 0x8000ffff) {
7581 u32 eax = 0x80000001;
7582 - asm("cpuid"
7583 + asm volatile("cpuid"
7584 : "+a" (eax),
7585 "=c" (cpu.flags[6]),
7586 "=d" (cpu.flags[1])
7587 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7588 u32 ecx = MSR_K7_HWCR;
7589 u32 eax, edx;
7590
7591 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7592 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7593 eax &= ~(1 << 15);
7594 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7595 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7596
7597 get_flags(); /* Make sure it really did something */
7598 err = check_flags();
7599 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7600 u32 ecx = MSR_VIA_FCR;
7601 u32 eax, edx;
7602
7603 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7604 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7605 eax |= (1<<1)|(1<<7);
7606 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7607 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7608
7609 set_bit(X86_FEATURE_CX8, cpu.flags);
7610 err = check_flags();
7611 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7612 u32 eax, edx;
7613 u32 level = 1;
7614
7615 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7616 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7617 - asm("cpuid"
7618 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7619 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7620 + asm volatile("cpuid"
7621 : "+a" (level), "=d" (cpu.flags[0])
7622 : : "ecx", "ebx");
7623 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7624 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7625
7626 err = check_flags();
7627 }
7628 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7629 index f1bbeeb..aff09cb 100644
7630 --- a/arch/x86/boot/header.S
7631 +++ b/arch/x86/boot/header.S
7632 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7633 # single linked list of
7634 # struct setup_data
7635
7636 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7637 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7638
7639 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7640 #define VO_INIT_SIZE (VO__end - VO__text)
7641 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7642 index db75d07..8e6d0af 100644
7643 --- a/arch/x86/boot/memory.c
7644 +++ b/arch/x86/boot/memory.c
7645 @@ -19,7 +19,7 @@
7646
7647 static int detect_memory_e820(void)
7648 {
7649 - int count = 0;
7650 + unsigned int count = 0;
7651 struct biosregs ireg, oreg;
7652 struct e820entry *desc = boot_params.e820_map;
7653 static struct e820entry buf; /* static so it is zeroed */
7654 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7655 index 11e8c6e..fdbb1ed 100644
7656 --- a/arch/x86/boot/video-vesa.c
7657 +++ b/arch/x86/boot/video-vesa.c
7658 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7659
7660 boot_params.screen_info.vesapm_seg = oreg.es;
7661 boot_params.screen_info.vesapm_off = oreg.di;
7662 + boot_params.screen_info.vesapm_size = oreg.cx;
7663 }
7664
7665 /*
7666 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7667 index 43eda28..5ab5fdb 100644
7668 --- a/arch/x86/boot/video.c
7669 +++ b/arch/x86/boot/video.c
7670 @@ -96,7 +96,7 @@ static void store_mode_params(void)
7671 static unsigned int get_entry(void)
7672 {
7673 char entry_buf[4];
7674 - int i, len = 0;
7675 + unsigned int i, len = 0;
7676 int key;
7677 unsigned int v;
7678
7679 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7680 index 5b577d5..3c1fed4 100644
7681 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7682 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7683 @@ -8,6 +8,8 @@
7684 * including this sentence is retained in full.
7685 */
7686
7687 +#include <asm/alternative-asm.h>
7688 +
7689 .extern crypto_ft_tab
7690 .extern crypto_it_tab
7691 .extern crypto_fl_tab
7692 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7693 je B192; \
7694 leaq 32(r9),r9;
7695
7696 +#define ret pax_force_retaddr 0, 1; ret
7697 +
7698 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7699 movq r1,r2; \
7700 movq r3,r4; \
7701 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7702 index be6d9e3..21fbbca 100644
7703 --- a/arch/x86/crypto/aesni-intel_asm.S
7704 +++ b/arch/x86/crypto/aesni-intel_asm.S
7705 @@ -31,6 +31,7 @@
7706
7707 #include <linux/linkage.h>
7708 #include <asm/inst.h>
7709 +#include <asm/alternative-asm.h>
7710
7711 #ifdef __x86_64__
7712 .data
7713 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7714 pop %r14
7715 pop %r13
7716 pop %r12
7717 + pax_force_retaddr 0, 1
7718 ret
7719 +ENDPROC(aesni_gcm_dec)
7720
7721
7722 /*****************************************************************************
7723 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7724 pop %r14
7725 pop %r13
7726 pop %r12
7727 + pax_force_retaddr 0, 1
7728 ret
7729 +ENDPROC(aesni_gcm_enc)
7730
7731 #endif
7732
7733 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
7734 pxor %xmm1, %xmm0
7735 movaps %xmm0, (TKEYP)
7736 add $0x10, TKEYP
7737 + pax_force_retaddr_bts
7738 ret
7739
7740 .align 4
7741 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
7742 shufps $0b01001110, %xmm2, %xmm1
7743 movaps %xmm1, 0x10(TKEYP)
7744 add $0x20, TKEYP
7745 + pax_force_retaddr_bts
7746 ret
7747
7748 .align 4
7749 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
7750
7751 movaps %xmm0, (TKEYP)
7752 add $0x10, TKEYP
7753 + pax_force_retaddr_bts
7754 ret
7755
7756 .align 4
7757 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
7758 pxor %xmm1, %xmm2
7759 movaps %xmm2, (TKEYP)
7760 add $0x10, TKEYP
7761 + pax_force_retaddr_bts
7762 ret
7763
7764 /*
7765 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7766 #ifndef __x86_64__
7767 popl KEYP
7768 #endif
7769 + pax_force_retaddr 0, 1
7770 ret
7771 +ENDPROC(aesni_set_key)
7772
7773 /*
7774 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7775 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7776 popl KLEN
7777 popl KEYP
7778 #endif
7779 + pax_force_retaddr 0, 1
7780 ret
7781 +ENDPROC(aesni_enc)
7782
7783 /*
7784 * _aesni_enc1: internal ABI
7785 @@ -1959,6 +1972,7 @@ _aesni_enc1:
7786 AESENC KEY STATE
7787 movaps 0x70(TKEYP), KEY
7788 AESENCLAST KEY STATE
7789 + pax_force_retaddr_bts
7790 ret
7791
7792 /*
7793 @@ -2067,6 +2081,7 @@ _aesni_enc4:
7794 AESENCLAST KEY STATE2
7795 AESENCLAST KEY STATE3
7796 AESENCLAST KEY STATE4
7797 + pax_force_retaddr_bts
7798 ret
7799
7800 /*
7801 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7802 popl KLEN
7803 popl KEYP
7804 #endif
7805 + pax_force_retaddr 0, 1
7806 ret
7807 +ENDPROC(aesni_dec)
7808
7809 /*
7810 * _aesni_dec1: internal ABI
7811 @@ -2146,6 +2163,7 @@ _aesni_dec1:
7812 AESDEC KEY STATE
7813 movaps 0x70(TKEYP), KEY
7814 AESDECLAST KEY STATE
7815 + pax_force_retaddr_bts
7816 ret
7817
7818 /*
7819 @@ -2254,6 +2272,7 @@ _aesni_dec4:
7820 AESDECLAST KEY STATE2
7821 AESDECLAST KEY STATE3
7822 AESDECLAST KEY STATE4
7823 + pax_force_retaddr_bts
7824 ret
7825
7826 /*
7827 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7828 popl KEYP
7829 popl LEN
7830 #endif
7831 + pax_force_retaddr 0, 1
7832 ret
7833 +ENDPROC(aesni_ecb_enc)
7834
7835 /*
7836 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7837 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7838 popl KEYP
7839 popl LEN
7840 #endif
7841 + pax_force_retaddr 0, 1
7842 ret
7843 +ENDPROC(aesni_ecb_dec)
7844
7845 /*
7846 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7847 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7848 popl LEN
7849 popl IVP
7850 #endif
7851 + pax_force_retaddr 0, 1
7852 ret
7853 +ENDPROC(aesni_cbc_enc)
7854
7855 /*
7856 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7857 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7858 popl LEN
7859 popl IVP
7860 #endif
7861 + pax_force_retaddr 0, 1
7862 ret
7863 +ENDPROC(aesni_cbc_dec)
7864
7865 #ifdef __x86_64__
7866 .align 16
7867 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
7868 mov $1, TCTR_LOW
7869 MOVQ_R64_XMM TCTR_LOW INC
7870 MOVQ_R64_XMM CTR TCTR_LOW
7871 + pax_force_retaddr_bts
7872 ret
7873
7874 /*
7875 @@ -2552,6 +2580,7 @@ _aesni_inc:
7876 .Linc_low:
7877 movaps CTR, IV
7878 PSHUFB_XMM BSWAP_MASK IV
7879 + pax_force_retaddr_bts
7880 ret
7881
7882 /*
7883 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7884 .Lctr_enc_ret:
7885 movups IV, (IVP)
7886 .Lctr_enc_just_ret:
7887 + pax_force_retaddr 0, 1
7888 ret
7889 +ENDPROC(aesni_ctr_enc)
7890 #endif
7891 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7892 index 391d245..67f35c2 100644
7893 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7894 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7895 @@ -20,6 +20,8 @@
7896 *
7897 */
7898
7899 +#include <asm/alternative-asm.h>
7900 +
7901 .file "blowfish-x86_64-asm.S"
7902 .text
7903
7904 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
7905 jnz __enc_xor;
7906
7907 write_block();
7908 + pax_force_retaddr 0, 1
7909 ret;
7910 __enc_xor:
7911 xor_block();
7912 + pax_force_retaddr 0, 1
7913 ret;
7914
7915 .align 8
7916 @@ -188,6 +192,7 @@ blowfish_dec_blk:
7917
7918 movq %r11, %rbp;
7919
7920 + pax_force_retaddr 0, 1
7921 ret;
7922
7923 /**********************************************************************
7924 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7925
7926 popq %rbx;
7927 popq %rbp;
7928 + pax_force_retaddr 0, 1
7929 ret;
7930
7931 __enc_xor4:
7932 @@ -349,6 +355,7 @@ __enc_xor4:
7933
7934 popq %rbx;
7935 popq %rbp;
7936 + pax_force_retaddr 0, 1
7937 ret;
7938
7939 .align 8
7940 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7941 popq %rbx;
7942 popq %rbp;
7943
7944 + pax_force_retaddr 0, 1
7945 ret;
7946
7947 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7948 index 6214a9b..1f4fc9a 100644
7949 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7950 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7951 @@ -1,3 +1,5 @@
7952 +#include <asm/alternative-asm.h>
7953 +
7954 # enter ECRYPT_encrypt_bytes
7955 .text
7956 .p2align 5
7957 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7958 add %r11,%rsp
7959 mov %rdi,%rax
7960 mov %rsi,%rdx
7961 + pax_force_retaddr 0, 1
7962 ret
7963 # bytesatleast65:
7964 ._bytesatleast65:
7965 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7966 add %r11,%rsp
7967 mov %rdi,%rax
7968 mov %rsi,%rdx
7969 + pax_force_retaddr
7970 ret
7971 # enter ECRYPT_ivsetup
7972 .text
7973 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7974 add %r11,%rsp
7975 mov %rdi,%rax
7976 mov %rsi,%rdx
7977 + pax_force_retaddr
7978 ret
7979 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7980 index 7f24a15..9cd3ffe 100644
7981 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7982 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7983 @@ -24,6 +24,8 @@
7984 *
7985 */
7986
7987 +#include <asm/alternative-asm.h>
7988 +
7989 .file "serpent-sse2-x86_64-asm_64.S"
7990 .text
7991
7992 @@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
7993 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
7994 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
7995
7996 + pax_force_retaddr
7997 ret;
7998
7999 __enc_xor8:
8000 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8001 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8002
8003 + pax_force_retaddr
8004 ret;
8005
8006 .align 8
8007 @@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8008 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8009 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8010
8011 + pax_force_retaddr
8012 ret;
8013 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8014 index b2c2f57..8470cab 100644
8015 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8016 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8017 @@ -28,6 +28,8 @@
8018 * (at your option) any later version.
8019 */
8020
8021 +#include <asm/alternative-asm.h>
8022 +
8023 #define CTX %rdi // arg1
8024 #define BUF %rsi // arg2
8025 #define CNT %rdx // arg3
8026 @@ -104,6 +106,7 @@
8027 pop %r12
8028 pop %rbp
8029 pop %rbx
8030 + pax_force_retaddr 0, 1
8031 ret
8032
8033 .size \name, .-\name
8034 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8035 index 5b012a2..36d5364 100644
8036 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8037 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8038 @@ -20,6 +20,8 @@
8039 *
8040 */
8041
8042 +#include <asm/alternative-asm.h>
8043 +
8044 .file "twofish-x86_64-asm-3way.S"
8045 .text
8046
8047 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8048 popq %r13;
8049 popq %r14;
8050 popq %r15;
8051 + pax_force_retaddr 0, 1
8052 ret;
8053
8054 __enc_xor3:
8055 @@ -271,6 +274,7 @@ __enc_xor3:
8056 popq %r13;
8057 popq %r14;
8058 popq %r15;
8059 + pax_force_retaddr 0, 1
8060 ret;
8061
8062 .global twofish_dec_blk_3way
8063 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8064 popq %r13;
8065 popq %r14;
8066 popq %r15;
8067 + pax_force_retaddr 0, 1
8068 ret;
8069
8070 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8071 index 7bcf3fc..f53832f 100644
8072 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8073 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8074 @@ -21,6 +21,7 @@
8075 .text
8076
8077 #include <asm/asm-offsets.h>
8078 +#include <asm/alternative-asm.h>
8079
8080 #define a_offset 0
8081 #define b_offset 4
8082 @@ -268,6 +269,7 @@ twofish_enc_blk:
8083
8084 popq R1
8085 movq $1,%rax
8086 + pax_force_retaddr 0, 1
8087 ret
8088
8089 twofish_dec_blk:
8090 @@ -319,4 +321,5 @@ twofish_dec_blk:
8091
8092 popq R1
8093 movq $1,%rax
8094 + pax_force_retaddr 0, 1
8095 ret
8096 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8097 index 39e4909..887aa7e 100644
8098 --- a/arch/x86/ia32/ia32_aout.c
8099 +++ b/arch/x86/ia32/ia32_aout.c
8100 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8101 unsigned long dump_start, dump_size;
8102 struct user32 dump;
8103
8104 + memset(&dump, 0, sizeof(dump));
8105 +
8106 fs = get_fs();
8107 set_fs(KERNEL_DS);
8108 has_dumped = 1;
8109 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8110 index 6557769..ef6ae89 100644
8111 --- a/arch/x86/ia32/ia32_signal.c
8112 +++ b/arch/x86/ia32/ia32_signal.c
8113 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8114 }
8115 seg = get_fs();
8116 set_fs(KERNEL_DS);
8117 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8118 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8119 set_fs(seg);
8120 if (ret >= 0 && uoss_ptr) {
8121 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8122 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8123 */
8124 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8125 size_t frame_size,
8126 - void **fpstate)
8127 + void __user **fpstate)
8128 {
8129 unsigned long sp;
8130
8131 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8132
8133 if (used_math()) {
8134 sp = sp - sig_xstate_ia32_size;
8135 - *fpstate = (struct _fpstate_ia32 *) sp;
8136 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8137 if (save_i387_xstate_ia32(*fpstate) < 0)
8138 return (void __user *) -1L;
8139 }
8140 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8141 sp -= frame_size;
8142 /* Align the stack pointer according to the i386 ABI,
8143 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8144 - sp = ((sp + 4) & -16ul) - 4;
8145 + sp = ((sp - 12) & -16ul) - 4;
8146 return (void __user *) sp;
8147 }
8148
8149 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8150 * These are actually not used anymore, but left because some
8151 * gdb versions depend on them as a marker.
8152 */
8153 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8154 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8155 } put_user_catch(err);
8156
8157 if (err)
8158 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8159 0xb8,
8160 __NR_ia32_rt_sigreturn,
8161 0x80cd,
8162 - 0,
8163 + 0
8164 };
8165
8166 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8167 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8168
8169 if (ka->sa.sa_flags & SA_RESTORER)
8170 restorer = ka->sa.sa_restorer;
8171 + else if (current->mm->context.vdso)
8172 + /* Return stub is in 32bit vsyscall page */
8173 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8174 else
8175 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8176 - rt_sigreturn);
8177 + restorer = &frame->retcode;
8178 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8179
8180 /*
8181 * Not actually used anymore, but left because some gdb
8182 * versions need it.
8183 */
8184 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8185 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8186 } put_user_catch(err);
8187
8188 if (err)
8189 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8190 index e3e7340..05ed805 100644
8191 --- a/arch/x86/ia32/ia32entry.S
8192 +++ b/arch/x86/ia32/ia32entry.S
8193 @@ -13,8 +13,10 @@
8194 #include <asm/thread_info.h>
8195 #include <asm/segment.h>
8196 #include <asm/irqflags.h>
8197 +#include <asm/pgtable.h>
8198 #include <linux/linkage.h>
8199 #include <linux/err.h>
8200 +#include <asm/alternative-asm.h>
8201
8202 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8203 #include <linux/elf-em.h>
8204 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8205 ENDPROC(native_irq_enable_sysexit)
8206 #endif
8207
8208 + .macro pax_enter_kernel_user
8209 + pax_set_fptr_mask
8210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8211 + call pax_enter_kernel_user
8212 +#endif
8213 + .endm
8214 +
8215 + .macro pax_exit_kernel_user
8216 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8217 + call pax_exit_kernel_user
8218 +#endif
8219 +#ifdef CONFIG_PAX_RANDKSTACK
8220 + pushq %rax
8221 + pushq %r11
8222 + call pax_randomize_kstack
8223 + popq %r11
8224 + popq %rax
8225 +#endif
8226 + .endm
8227 +
8228 +.macro pax_erase_kstack
8229 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8230 + call pax_erase_kstack
8231 +#endif
8232 +.endm
8233 +
8234 /*
8235 * 32bit SYSENTER instruction entry.
8236 *
8237 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8238 CFI_REGISTER rsp,rbp
8239 SWAPGS_UNSAFE_STACK
8240 movq PER_CPU_VAR(kernel_stack), %rsp
8241 - addq $(KERNEL_STACK_OFFSET),%rsp
8242 - /*
8243 - * No need to follow this irqs on/off section: the syscall
8244 - * disabled irqs, here we enable it straight after entry:
8245 - */
8246 - ENABLE_INTERRUPTS(CLBR_NONE)
8247 movl %ebp,%ebp /* zero extension */
8248 pushq_cfi $__USER32_DS
8249 /*CFI_REL_OFFSET ss,0*/
8250 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8251 CFI_REL_OFFSET rsp,0
8252 pushfq_cfi
8253 /*CFI_REL_OFFSET rflags,0*/
8254 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8255 - CFI_REGISTER rip,r10
8256 + orl $X86_EFLAGS_IF,(%rsp)
8257 + GET_THREAD_INFO(%r11)
8258 + movl TI_sysenter_return(%r11), %r11d
8259 + CFI_REGISTER rip,r11
8260 pushq_cfi $__USER32_CS
8261 /*CFI_REL_OFFSET cs,0*/
8262 movl %eax, %eax
8263 - pushq_cfi %r10
8264 + pushq_cfi %r11
8265 CFI_REL_OFFSET rip,0
8266 pushq_cfi %rax
8267 cld
8268 SAVE_ARGS 0,1,0
8269 + pax_enter_kernel_user
8270 + /*
8271 + * No need to follow this irqs on/off section: the syscall
8272 + * disabled irqs, here we enable it straight after entry:
8273 + */
8274 + ENABLE_INTERRUPTS(CLBR_NONE)
8275 /* no need to do an access_ok check here because rbp has been
8276 32bit zero extended */
8277 +
8278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8279 + mov $PAX_USER_SHADOW_BASE,%r11
8280 + add %r11,%rbp
8281 +#endif
8282 +
8283 1: movl (%rbp),%ebp
8284 .section __ex_table,"a"
8285 .quad 1b,ia32_badarg
8286 .previous
8287 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8288 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8289 + GET_THREAD_INFO(%r11)
8290 + orl $TS_COMPAT,TI_status(%r11)
8291 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8292 CFI_REMEMBER_STATE
8293 jnz sysenter_tracesys
8294 cmpq $(IA32_NR_syscalls-1),%rax
8295 @@ -160,12 +197,15 @@ sysenter_do_call:
8296 sysenter_dispatch:
8297 call *ia32_sys_call_table(,%rax,8)
8298 movq %rax,RAX-ARGOFFSET(%rsp)
8299 + GET_THREAD_INFO(%r11)
8300 DISABLE_INTERRUPTS(CLBR_NONE)
8301 TRACE_IRQS_OFF
8302 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8303 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8304 jnz sysexit_audit
8305 sysexit_from_sys_call:
8306 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8307 + pax_exit_kernel_user
8308 + pax_erase_kstack
8309 + andl $~TS_COMPAT,TI_status(%r11)
8310 /* clear IF, that popfq doesn't enable interrupts early */
8311 andl $~0x200,EFLAGS-R11(%rsp)
8312 movl RIP-R11(%rsp),%edx /* User %eip */
8313 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8314 movl %eax,%esi /* 2nd arg: syscall number */
8315 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8316 call __audit_syscall_entry
8317 +
8318 + pax_erase_kstack
8319 +
8320 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8321 cmpq $(IA32_NR_syscalls-1),%rax
8322 ja ia32_badsys
8323 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8324 .endm
8325
8326 .macro auditsys_exit exit
8327 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8328 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8329 jnz ia32_ret_from_sys_call
8330 TRACE_IRQS_ON
8331 sti
8332 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8333 1: setbe %al /* 1 if error, 0 if not */
8334 movzbl %al,%edi /* zero-extend that into %edi */
8335 call __audit_syscall_exit
8336 + GET_THREAD_INFO(%r11)
8337 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8338 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8339 cli
8340 TRACE_IRQS_OFF
8341 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8342 + testl %edi,TI_flags(%r11)
8343 jz \exit
8344 CLEAR_RREGS -ARGOFFSET
8345 jmp int_with_check
8346 @@ -235,7 +279,7 @@ sysexit_audit:
8347
8348 sysenter_tracesys:
8349 #ifdef CONFIG_AUDITSYSCALL
8350 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8351 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8352 jz sysenter_auditsys
8353 #endif
8354 SAVE_REST
8355 @@ -243,6 +287,9 @@ sysenter_tracesys:
8356 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8357 movq %rsp,%rdi /* &pt_regs -> arg1 */
8358 call syscall_trace_enter
8359 +
8360 + pax_erase_kstack
8361 +
8362 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8363 RESTORE_REST
8364 cmpq $(IA32_NR_syscalls-1),%rax
8365 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8366 ENTRY(ia32_cstar_target)
8367 CFI_STARTPROC32 simple
8368 CFI_SIGNAL_FRAME
8369 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8370 + CFI_DEF_CFA rsp,0
8371 CFI_REGISTER rip,rcx
8372 /*CFI_REGISTER rflags,r11*/
8373 SWAPGS_UNSAFE_STACK
8374 movl %esp,%r8d
8375 CFI_REGISTER rsp,r8
8376 movq PER_CPU_VAR(kernel_stack),%rsp
8377 + SAVE_ARGS 8*6,0,0
8378 + pax_enter_kernel_user
8379 /*
8380 * No need to follow this irqs on/off section: the syscall
8381 * disabled irqs and here we enable it straight after entry:
8382 */
8383 ENABLE_INTERRUPTS(CLBR_NONE)
8384 - SAVE_ARGS 8,0,0
8385 movl %eax,%eax /* zero extension */
8386 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8387 movq %rcx,RIP-ARGOFFSET(%rsp)
8388 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8389 /* no need to do an access_ok check here because r8 has been
8390 32bit zero extended */
8391 /* hardware stack frame is complete now */
8392 +
8393 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8394 + mov $PAX_USER_SHADOW_BASE,%r11
8395 + add %r11,%r8
8396 +#endif
8397 +
8398 1: movl (%r8),%r9d
8399 .section __ex_table,"a"
8400 .quad 1b,ia32_badarg
8401 .previous
8402 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8403 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8404 + GET_THREAD_INFO(%r11)
8405 + orl $TS_COMPAT,TI_status(%r11)
8406 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8407 CFI_REMEMBER_STATE
8408 jnz cstar_tracesys
8409 cmpq $IA32_NR_syscalls-1,%rax
8410 @@ -317,12 +372,15 @@ cstar_do_call:
8411 cstar_dispatch:
8412 call *ia32_sys_call_table(,%rax,8)
8413 movq %rax,RAX-ARGOFFSET(%rsp)
8414 + GET_THREAD_INFO(%r11)
8415 DISABLE_INTERRUPTS(CLBR_NONE)
8416 TRACE_IRQS_OFF
8417 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8418 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8419 jnz sysretl_audit
8420 sysretl_from_sys_call:
8421 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8422 + pax_exit_kernel_user
8423 + pax_erase_kstack
8424 + andl $~TS_COMPAT,TI_status(%r11)
8425 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8426 movl RIP-ARGOFFSET(%rsp),%ecx
8427 CFI_REGISTER rip,rcx
8428 @@ -350,7 +408,7 @@ sysretl_audit:
8429
8430 cstar_tracesys:
8431 #ifdef CONFIG_AUDITSYSCALL
8432 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8433 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8434 jz cstar_auditsys
8435 #endif
8436 xchgl %r9d,%ebp
8437 @@ -359,6 +417,9 @@ cstar_tracesys:
8438 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8439 movq %rsp,%rdi /* &pt_regs -> arg1 */
8440 call syscall_trace_enter
8441 +
8442 + pax_erase_kstack
8443 +
8444 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8445 RESTORE_REST
8446 xchgl %ebp,%r9d
8447 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8448 CFI_REL_OFFSET rip,RIP-RIP
8449 PARAVIRT_ADJUST_EXCEPTION_FRAME
8450 SWAPGS
8451 - /*
8452 - * No need to follow this irqs on/off section: the syscall
8453 - * disabled irqs and here we enable it straight after entry:
8454 - */
8455 - ENABLE_INTERRUPTS(CLBR_NONE)
8456 movl %eax,%eax
8457 pushq_cfi %rax
8458 cld
8459 /* note the registers are not zero extended to the sf.
8460 this could be a problem. */
8461 SAVE_ARGS 0,1,0
8462 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8463 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8464 + pax_enter_kernel_user
8465 + /*
8466 + * No need to follow this irqs on/off section: the syscall
8467 + * disabled irqs and here we enable it straight after entry:
8468 + */
8469 + ENABLE_INTERRUPTS(CLBR_NONE)
8470 + GET_THREAD_INFO(%r11)
8471 + orl $TS_COMPAT,TI_status(%r11)
8472 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8473 jnz ia32_tracesys
8474 cmpq $(IA32_NR_syscalls-1),%rax
8475 ja ia32_badsys
8476 @@ -435,6 +498,9 @@ ia32_tracesys:
8477 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8478 movq %rsp,%rdi /* &pt_regs -> arg1 */
8479 call syscall_trace_enter
8480 +
8481 + pax_erase_kstack
8482 +
8483 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8484 RESTORE_REST
8485 cmpq $(IA32_NR_syscalls-1),%rax
8486 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8487 index f6f5c53..b358b28 100644
8488 --- a/arch/x86/ia32/sys_ia32.c
8489 +++ b/arch/x86/ia32/sys_ia32.c
8490 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8491 */
8492 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8493 {
8494 - typeof(ubuf->st_uid) uid = 0;
8495 - typeof(ubuf->st_gid) gid = 0;
8496 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8497 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8498 SET_UID(uid, stat->uid);
8499 SET_GID(gid, stat->gid);
8500 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8501 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8502 }
8503 set_fs(KERNEL_DS);
8504 ret = sys_rt_sigprocmask(how,
8505 - set ? (sigset_t __user *)&s : NULL,
8506 - oset ? (sigset_t __user *)&s : NULL,
8507 + set ? (sigset_t __force_user *)&s : NULL,
8508 + oset ? (sigset_t __force_user *)&s : NULL,
8509 sigsetsize);
8510 set_fs(old_fs);
8511 if (ret)
8512 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8513 return alarm_setitimer(seconds);
8514 }
8515
8516 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8517 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8518 int options)
8519 {
8520 return compat_sys_wait4(pid, stat_addr, options, NULL);
8521 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8522 mm_segment_t old_fs = get_fs();
8523
8524 set_fs(KERNEL_DS);
8525 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8526 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8527 set_fs(old_fs);
8528 if (put_compat_timespec(&t, interval))
8529 return -EFAULT;
8530 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8531 mm_segment_t old_fs = get_fs();
8532
8533 set_fs(KERNEL_DS);
8534 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8535 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8536 set_fs(old_fs);
8537 if (!ret) {
8538 switch (_NSIG_WORDS) {
8539 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8540 if (copy_siginfo_from_user32(&info, uinfo))
8541 return -EFAULT;
8542 set_fs(KERNEL_DS);
8543 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8544 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8545 set_fs(old_fs);
8546 return ret;
8547 }
8548 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8549 return -EFAULT;
8550
8551 set_fs(KERNEL_DS);
8552 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8553 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8554 count);
8555 set_fs(old_fs);
8556
8557 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8558 index 952bd01..7692c6f 100644
8559 --- a/arch/x86/include/asm/alternative-asm.h
8560 +++ b/arch/x86/include/asm/alternative-asm.h
8561 @@ -15,6 +15,45 @@
8562 .endm
8563 #endif
8564
8565 +#ifdef KERNEXEC_PLUGIN
8566 + .macro pax_force_retaddr_bts rip=0
8567 + btsq $63,\rip(%rsp)
8568 + .endm
8569 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8570 + .macro pax_force_retaddr rip=0, reload=0
8571 + btsq $63,\rip(%rsp)
8572 + .endm
8573 + .macro pax_force_fptr ptr
8574 + btsq $63,\ptr
8575 + .endm
8576 + .macro pax_set_fptr_mask
8577 + .endm
8578 +#endif
8579 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8580 + .macro pax_force_retaddr rip=0, reload=0
8581 + .if \reload
8582 + pax_set_fptr_mask
8583 + .endif
8584 + orq %r10,\rip(%rsp)
8585 + .endm
8586 + .macro pax_force_fptr ptr
8587 + orq %r10,\ptr
8588 + .endm
8589 + .macro pax_set_fptr_mask
8590 + movabs $0x8000000000000000,%r10
8591 + .endm
8592 +#endif
8593 +#else
8594 + .macro pax_force_retaddr rip=0, reload=0
8595 + .endm
8596 + .macro pax_force_fptr ptr
8597 + .endm
8598 + .macro pax_force_retaddr_bts rip=0
8599 + .endm
8600 + .macro pax_set_fptr_mask
8601 + .endm
8602 +#endif
8603 +
8604 .macro altinstruction_entry orig alt feature orig_len alt_len
8605 .long \orig - .
8606 .long \alt - .
8607 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8608 index 37ad100..7d47faa 100644
8609 --- a/arch/x86/include/asm/alternative.h
8610 +++ b/arch/x86/include/asm/alternative.h
8611 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8612 ".section .discard,\"aw\",@progbits\n" \
8613 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8614 ".previous\n" \
8615 - ".section .altinstr_replacement, \"ax\"\n" \
8616 + ".section .altinstr_replacement, \"a\"\n" \
8617 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8618 ".previous"
8619
8620 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8621 index 3ab9bdd..238033e 100644
8622 --- a/arch/x86/include/asm/apic.h
8623 +++ b/arch/x86/include/asm/apic.h
8624 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8625
8626 #ifdef CONFIG_X86_LOCAL_APIC
8627
8628 -extern unsigned int apic_verbosity;
8629 +extern int apic_verbosity;
8630 extern int local_apic_timer_c2_ok;
8631
8632 extern int disable_apic;
8633 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8634 index 20370c6..a2eb9b0 100644
8635 --- a/arch/x86/include/asm/apm.h
8636 +++ b/arch/x86/include/asm/apm.h
8637 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8638 __asm__ __volatile__(APM_DO_ZERO_SEGS
8639 "pushl %%edi\n\t"
8640 "pushl %%ebp\n\t"
8641 - "lcall *%%cs:apm_bios_entry\n\t"
8642 + "lcall *%%ss:apm_bios_entry\n\t"
8643 "setc %%al\n\t"
8644 "popl %%ebp\n\t"
8645 "popl %%edi\n\t"
8646 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8647 __asm__ __volatile__(APM_DO_ZERO_SEGS
8648 "pushl %%edi\n\t"
8649 "pushl %%ebp\n\t"
8650 - "lcall *%%cs:apm_bios_entry\n\t"
8651 + "lcall *%%ss:apm_bios_entry\n\t"
8652 "setc %%bl\n\t"
8653 "popl %%ebp\n\t"
8654 "popl %%edi\n\t"
8655 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8656 index 58cb6d4..ca9010d 100644
8657 --- a/arch/x86/include/asm/atomic.h
8658 +++ b/arch/x86/include/asm/atomic.h
8659 @@ -22,7 +22,18 @@
8660 */
8661 static inline int atomic_read(const atomic_t *v)
8662 {
8663 - return (*(volatile int *)&(v)->counter);
8664 + return (*(volatile const int *)&(v)->counter);
8665 +}
8666 +
8667 +/**
8668 + * atomic_read_unchecked - read atomic variable
8669 + * @v: pointer of type atomic_unchecked_t
8670 + *
8671 + * Atomically reads the value of @v.
8672 + */
8673 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8674 +{
8675 + return (*(volatile const int *)&(v)->counter);
8676 }
8677
8678 /**
8679 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8680 }
8681
8682 /**
8683 + * atomic_set_unchecked - set atomic variable
8684 + * @v: pointer of type atomic_unchecked_t
8685 + * @i: required value
8686 + *
8687 + * Atomically sets the value of @v to @i.
8688 + */
8689 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8690 +{
8691 + v->counter = i;
8692 +}
8693 +
8694 +/**
8695 * atomic_add - add integer to atomic variable
8696 * @i: integer value to add
8697 * @v: pointer of type atomic_t
8698 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8699 */
8700 static inline void atomic_add(int i, atomic_t *v)
8701 {
8702 - asm volatile(LOCK_PREFIX "addl %1,%0"
8703 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8704 +
8705 +#ifdef CONFIG_PAX_REFCOUNT
8706 + "jno 0f\n"
8707 + LOCK_PREFIX "subl %1,%0\n"
8708 + "int $4\n0:\n"
8709 + _ASM_EXTABLE(0b, 0b)
8710 +#endif
8711 +
8712 + : "+m" (v->counter)
8713 + : "ir" (i));
8714 +}
8715 +
8716 +/**
8717 + * atomic_add_unchecked - add integer to atomic variable
8718 + * @i: integer value to add
8719 + * @v: pointer of type atomic_unchecked_t
8720 + *
8721 + * Atomically adds @i to @v.
8722 + */
8723 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8724 +{
8725 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8726 : "+m" (v->counter)
8727 : "ir" (i));
8728 }
8729 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8730 */
8731 static inline void atomic_sub(int i, atomic_t *v)
8732 {
8733 - asm volatile(LOCK_PREFIX "subl %1,%0"
8734 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8735 +
8736 +#ifdef CONFIG_PAX_REFCOUNT
8737 + "jno 0f\n"
8738 + LOCK_PREFIX "addl %1,%0\n"
8739 + "int $4\n0:\n"
8740 + _ASM_EXTABLE(0b, 0b)
8741 +#endif
8742 +
8743 + : "+m" (v->counter)
8744 + : "ir" (i));
8745 +}
8746 +
8747 +/**
8748 + * atomic_sub_unchecked - subtract integer from atomic variable
8749 + * @i: integer value to subtract
8750 + * @v: pointer of type atomic_unchecked_t
8751 + *
8752 + * Atomically subtracts @i from @v.
8753 + */
8754 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8755 +{
8756 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8757 : "+m" (v->counter)
8758 : "ir" (i));
8759 }
8760 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8761 {
8762 unsigned char c;
8763
8764 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8765 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8766 +
8767 +#ifdef CONFIG_PAX_REFCOUNT
8768 + "jno 0f\n"
8769 + LOCK_PREFIX "addl %2,%0\n"
8770 + "int $4\n0:\n"
8771 + _ASM_EXTABLE(0b, 0b)
8772 +#endif
8773 +
8774 + "sete %1\n"
8775 : "+m" (v->counter), "=qm" (c)
8776 : "ir" (i) : "memory");
8777 return c;
8778 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8779 */
8780 static inline void atomic_inc(atomic_t *v)
8781 {
8782 - asm volatile(LOCK_PREFIX "incl %0"
8783 + asm volatile(LOCK_PREFIX "incl %0\n"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 + "jno 0f\n"
8787 + LOCK_PREFIX "decl %0\n"
8788 + "int $4\n0:\n"
8789 + _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792 + : "+m" (v->counter));
8793 +}
8794 +
8795 +/**
8796 + * atomic_inc_unchecked - increment atomic variable
8797 + * @v: pointer of type atomic_unchecked_t
8798 + *
8799 + * Atomically increments @v by 1.
8800 + */
8801 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8802 +{
8803 + asm volatile(LOCK_PREFIX "incl %0\n"
8804 : "+m" (v->counter));
8805 }
8806
8807 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8808 */
8809 static inline void atomic_dec(atomic_t *v)
8810 {
8811 - asm volatile(LOCK_PREFIX "decl %0"
8812 + asm volatile(LOCK_PREFIX "decl %0\n"
8813 +
8814 +#ifdef CONFIG_PAX_REFCOUNT
8815 + "jno 0f\n"
8816 + LOCK_PREFIX "incl %0\n"
8817 + "int $4\n0:\n"
8818 + _ASM_EXTABLE(0b, 0b)
8819 +#endif
8820 +
8821 + : "+m" (v->counter));
8822 +}
8823 +
8824 +/**
8825 + * atomic_dec_unchecked - decrement atomic variable
8826 + * @v: pointer of type atomic_unchecked_t
8827 + *
8828 + * Atomically decrements @v by 1.
8829 + */
8830 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8831 +{
8832 + asm volatile(LOCK_PREFIX "decl %0\n"
8833 : "+m" (v->counter));
8834 }
8835
8836 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8837 {
8838 unsigned char c;
8839
8840 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8841 + asm volatile(LOCK_PREFIX "decl %0\n"
8842 +
8843 +#ifdef CONFIG_PAX_REFCOUNT
8844 + "jno 0f\n"
8845 + LOCK_PREFIX "incl %0\n"
8846 + "int $4\n0:\n"
8847 + _ASM_EXTABLE(0b, 0b)
8848 +#endif
8849 +
8850 + "sete %1\n"
8851 : "+m" (v->counter), "=qm" (c)
8852 : : "memory");
8853 return c != 0;
8854 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8855 {
8856 unsigned char c;
8857
8858 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8859 + asm volatile(LOCK_PREFIX "incl %0\n"
8860 +
8861 +#ifdef CONFIG_PAX_REFCOUNT
8862 + "jno 0f\n"
8863 + LOCK_PREFIX "decl %0\n"
8864 + "int $4\n0:\n"
8865 + _ASM_EXTABLE(0b, 0b)
8866 +#endif
8867 +
8868 + "sete %1\n"
8869 + : "+m" (v->counter), "=qm" (c)
8870 + : : "memory");
8871 + return c != 0;
8872 +}
8873 +
8874 +/**
8875 + * atomic_inc_and_test_unchecked - increment and test
8876 + * @v: pointer of type atomic_unchecked_t
8877 + *
8878 + * Atomically increments @v by 1
8879 + * and returns true if the result is zero, or false for all
8880 + * other cases.
8881 + */
8882 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8883 +{
8884 + unsigned char c;
8885 +
8886 + asm volatile(LOCK_PREFIX "incl %0\n"
8887 + "sete %1\n"
8888 : "+m" (v->counter), "=qm" (c)
8889 : : "memory");
8890 return c != 0;
8891 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8892 {
8893 unsigned char c;
8894
8895 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8896 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8897 +
8898 +#ifdef CONFIG_PAX_REFCOUNT
8899 + "jno 0f\n"
8900 + LOCK_PREFIX "subl %2,%0\n"
8901 + "int $4\n0:\n"
8902 + _ASM_EXTABLE(0b, 0b)
8903 +#endif
8904 +
8905 + "sets %1\n"
8906 : "+m" (v->counter), "=qm" (c)
8907 : "ir" (i) : "memory");
8908 return c;
8909 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8910 goto no_xadd;
8911 #endif
8912 /* Modern 486+ processor */
8913 - return i + xadd(&v->counter, i);
8914 + return i + xadd_check_overflow(&v->counter, i);
8915
8916 #ifdef CONFIG_M386
8917 no_xadd: /* Legacy 386 processor */
8918 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8919 }
8920
8921 /**
8922 + * atomic_add_return_unchecked - add integer and return
8923 + * @i: integer value to add
8924 + * @v: pointer of type atomic_unchecked_t
8925 + *
8926 + * Atomically adds @i to @v and returns @i + @v
8927 + */
8928 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8929 +{
8930 +#ifdef CONFIG_M386
8931 + int __i;
8932 + unsigned long flags;
8933 + if (unlikely(boot_cpu_data.x86 <= 3))
8934 + goto no_xadd;
8935 +#endif
8936 + /* Modern 486+ processor */
8937 + return i + xadd(&v->counter, i);
8938 +
8939 +#ifdef CONFIG_M386
8940 +no_xadd: /* Legacy 386 processor */
8941 + raw_local_irq_save(flags);
8942 + __i = atomic_read_unchecked(v);
8943 + atomic_set_unchecked(v, i + __i);
8944 + raw_local_irq_restore(flags);
8945 + return i + __i;
8946 +#endif
8947 +}
8948 +
8949 +/**
8950 * atomic_sub_return - subtract integer and return
8951 * @v: pointer of type atomic_t
8952 * @i: integer value to subtract
8953 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8954 }
8955
8956 #define atomic_inc_return(v) (atomic_add_return(1, v))
8957 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8958 +{
8959 + return atomic_add_return_unchecked(1, v);
8960 +}
8961 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8962
8963 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8964 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8965 return cmpxchg(&v->counter, old, new);
8966 }
8967
8968 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8969 +{
8970 + return cmpxchg(&v->counter, old, new);
8971 +}
8972 +
8973 static inline int atomic_xchg(atomic_t *v, int new)
8974 {
8975 return xchg(&v->counter, new);
8976 }
8977
8978 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8979 +{
8980 + return xchg(&v->counter, new);
8981 +}
8982 +
8983 /**
8984 * __atomic_add_unless - add unless the number is already a given value
8985 * @v: pointer of type atomic_t
8986 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
8987 */
8988 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8989 {
8990 - int c, old;
8991 + int c, old, new;
8992 c = atomic_read(v);
8993 for (;;) {
8994 - if (unlikely(c == (u)))
8995 + if (unlikely(c == u))
8996 break;
8997 - old = atomic_cmpxchg((v), c, c + (a));
8998 +
8999 + asm volatile("addl %2,%0\n"
9000 +
9001 +#ifdef CONFIG_PAX_REFCOUNT
9002 + "jno 0f\n"
9003 + "subl %2,%0\n"
9004 + "int $4\n0:\n"
9005 + _ASM_EXTABLE(0b, 0b)
9006 +#endif
9007 +
9008 + : "=r" (new)
9009 + : "0" (c), "ir" (a));
9010 +
9011 + old = atomic_cmpxchg(v, c, new);
9012 if (likely(old == c))
9013 break;
9014 c = old;
9015 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9016 return c;
9017 }
9018
9019 +/**
9020 + * atomic_inc_not_zero_hint - increment if not null
9021 + * @v: pointer of type atomic_t
9022 + * @hint: probable value of the atomic before the increment
9023 + *
9024 + * This version of atomic_inc_not_zero() gives a hint of probable
9025 + * value of the atomic. This helps processor to not read the memory
9026 + * before doing the atomic read/modify/write cycle, lowering
9027 + * number of bus transactions on some arches.
9028 + *
9029 + * Returns: 0 if increment was not done, 1 otherwise.
9030 + */
9031 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9032 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9033 +{
9034 + int val, c = hint, new;
9035 +
9036 + /* sanity test, should be removed by compiler if hint is a constant */
9037 + if (!hint)
9038 + return __atomic_add_unless(v, 1, 0);
9039 +
9040 + do {
9041 + asm volatile("incl %0\n"
9042 +
9043 +#ifdef CONFIG_PAX_REFCOUNT
9044 + "jno 0f\n"
9045 + "decl %0\n"
9046 + "int $4\n0:\n"
9047 + _ASM_EXTABLE(0b, 0b)
9048 +#endif
9049 +
9050 + : "=r" (new)
9051 + : "0" (c));
9052 +
9053 + val = atomic_cmpxchg(v, c, new);
9054 + if (val == c)
9055 + return 1;
9056 + c = val;
9057 + } while (c);
9058 +
9059 + return 0;
9060 +}
9061
9062 /*
9063 * atomic_dec_if_positive - decrement by 1 if old value positive
9064 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9065 index fa13f0e..27c2e08 100644
9066 --- a/arch/x86/include/asm/atomic64_32.h
9067 +++ b/arch/x86/include/asm/atomic64_32.h
9068 @@ -12,6 +12,14 @@ typedef struct {
9069 u64 __aligned(8) counter;
9070 } atomic64_t;
9071
9072 +#ifdef CONFIG_PAX_REFCOUNT
9073 +typedef struct {
9074 + u64 __aligned(8) counter;
9075 +} atomic64_unchecked_t;
9076 +#else
9077 +typedef atomic64_t atomic64_unchecked_t;
9078 +#endif
9079 +
9080 #define ATOMIC64_INIT(val) { (val) }
9081
9082 #ifdef CONFIG_X86_CMPXCHG64
9083 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9084 }
9085
9086 /**
9087 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9088 + * @p: pointer to type atomic64_unchecked_t
9089 + * @o: expected value
9090 + * @n: new value
9091 + *
9092 + * Atomically sets @v to @n if it was equal to @o and returns
9093 + * the old value.
9094 + */
9095 +
9096 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9097 +{
9098 + return cmpxchg64(&v->counter, o, n);
9099 +}
9100 +
9101 +/**
9102 * atomic64_xchg - xchg atomic64 variable
9103 * @v: pointer to type atomic64_t
9104 * @n: value to assign
9105 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9106 }
9107
9108 /**
9109 + * atomic64_set_unchecked - set atomic64 variable
9110 + * @v: pointer to type atomic64_unchecked_t
9111 + * @n: value to assign
9112 + *
9113 + * Atomically sets the value of @v to @n.
9114 + */
9115 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9116 +{
9117 + unsigned high = (unsigned)(i >> 32);
9118 + unsigned low = (unsigned)i;
9119 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9120 + : "+b" (low), "+c" (high)
9121 + : "S" (v)
9122 + : "eax", "edx", "memory"
9123 + );
9124 +}
9125 +
9126 +/**
9127 * atomic64_read - read atomic64 variable
9128 * @v: pointer to type atomic64_t
9129 *
9130 @@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9131 }
9132
9133 /**
9134 + * atomic64_read_unchecked - read atomic64 variable
9135 + * @v: pointer to type atomic64_unchecked_t
9136 + *
9137 + * Atomically reads the value of @v and returns it.
9138 + */
9139 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9140 +{
9141 + long long r;
9142 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9143 + : "=A" (r), "+c" (v)
9144 + : : "memory"
9145 + );
9146 + return r;
9147 + }
9148 +
9149 +/**
9150 * atomic64_add_return - add and return
9151 * @i: integer value to add
9152 * @v: pointer to type atomic64_t
9153 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9154 return i;
9155 }
9156
9157 +/**
9158 + * atomic64_add_return_unchecked - add and return
9159 + * @i: integer value to add
9160 + * @v: pointer to type atomic64_unchecked_t
9161 + *
9162 + * Atomically adds @i to @v and returns @i + *@v
9163 + */
9164 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9165 +{
9166 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9167 + : "+A" (i), "+c" (v)
9168 + : : "memory"
9169 + );
9170 + return i;
9171 +}
9172 +
9173 /*
9174 * Other variants with different arithmetic operators:
9175 */
9176 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9177 return a;
9178 }
9179
9180 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9181 +{
9182 + long long a;
9183 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9184 + : "=A" (a)
9185 + : "S" (v)
9186 + : "memory", "ecx"
9187 + );
9188 + return a;
9189 +}
9190 +
9191 static inline long long atomic64_dec_return(atomic64_t *v)
9192 {
9193 long long a;
9194 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9195 }
9196
9197 /**
9198 + * atomic64_add_unchecked - add integer to atomic64 variable
9199 + * @i: integer value to add
9200 + * @v: pointer to type atomic64_unchecked_t
9201 + *
9202 + * Atomically adds @i to @v.
9203 + */
9204 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9205 +{
9206 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9207 + : "+A" (i), "+c" (v)
9208 + : : "memory"
9209 + );
9210 + return i;
9211 +}
9212 +
9213 +/**
9214 * atomic64_sub - subtract the atomic64 variable
9215 * @i: integer value to subtract
9216 * @v: pointer to type atomic64_t
9217 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9218 index 0e1cbfc..5623683 100644
9219 --- a/arch/x86/include/asm/atomic64_64.h
9220 +++ b/arch/x86/include/asm/atomic64_64.h
9221 @@ -18,7 +18,19 @@
9222 */
9223 static inline long atomic64_read(const atomic64_t *v)
9224 {
9225 - return (*(volatile long *)&(v)->counter);
9226 + return (*(volatile const long *)&(v)->counter);
9227 +}
9228 +
9229 +/**
9230 + * atomic64_read_unchecked - read atomic64 variable
9231 + * @v: pointer of type atomic64_unchecked_t
9232 + *
9233 + * Atomically reads the value of @v.
9234 + * Doesn't imply a read memory barrier.
9235 + */
9236 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9237 +{
9238 + return (*(volatile const long *)&(v)->counter);
9239 }
9240
9241 /**
9242 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9243 }
9244
9245 /**
9246 + * atomic64_set_unchecked - set atomic64 variable
9247 + * @v: pointer to type atomic64_unchecked_t
9248 + * @i: required value
9249 + *
9250 + * Atomically sets the value of @v to @i.
9251 + */
9252 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9253 +{
9254 + v->counter = i;
9255 +}
9256 +
9257 +/**
9258 * atomic64_add - add integer to atomic64 variable
9259 * @i: integer value to add
9260 * @v: pointer to type atomic64_t
9261 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9262 */
9263 static inline void atomic64_add(long i, atomic64_t *v)
9264 {
9265 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9266 +
9267 +#ifdef CONFIG_PAX_REFCOUNT
9268 + "jno 0f\n"
9269 + LOCK_PREFIX "subq %1,%0\n"
9270 + "int $4\n0:\n"
9271 + _ASM_EXTABLE(0b, 0b)
9272 +#endif
9273 +
9274 + : "=m" (v->counter)
9275 + : "er" (i), "m" (v->counter));
9276 +}
9277 +
9278 +/**
9279 + * atomic64_add_unchecked - add integer to atomic64 variable
9280 + * @i: integer value to add
9281 + * @v: pointer to type atomic64_unchecked_t
9282 + *
9283 + * Atomically adds @i to @v.
9284 + */
9285 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9286 +{
9287 asm volatile(LOCK_PREFIX "addq %1,%0"
9288 : "=m" (v->counter)
9289 : "er" (i), "m" (v->counter));
9290 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9291 */
9292 static inline void atomic64_sub(long i, atomic64_t *v)
9293 {
9294 - asm volatile(LOCK_PREFIX "subq %1,%0"
9295 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9296 +
9297 +#ifdef CONFIG_PAX_REFCOUNT
9298 + "jno 0f\n"
9299 + LOCK_PREFIX "addq %1,%0\n"
9300 + "int $4\n0:\n"
9301 + _ASM_EXTABLE(0b, 0b)
9302 +#endif
9303 +
9304 + : "=m" (v->counter)
9305 + : "er" (i), "m" (v->counter));
9306 +}
9307 +
9308 +/**
9309 + * atomic64_sub_unchecked - subtract the atomic64 variable
9310 + * @i: integer value to subtract
9311 + * @v: pointer to type atomic64_unchecked_t
9312 + *
9313 + * Atomically subtracts @i from @v.
9314 + */
9315 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9316 +{
9317 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9318 : "=m" (v->counter)
9319 : "er" (i), "m" (v->counter));
9320 }
9321 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9322 {
9323 unsigned char c;
9324
9325 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9326 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9327 +
9328 +#ifdef CONFIG_PAX_REFCOUNT
9329 + "jno 0f\n"
9330 + LOCK_PREFIX "addq %2,%0\n"
9331 + "int $4\n0:\n"
9332 + _ASM_EXTABLE(0b, 0b)
9333 +#endif
9334 +
9335 + "sete %1\n"
9336 : "=m" (v->counter), "=qm" (c)
9337 : "er" (i), "m" (v->counter) : "memory");
9338 return c;
9339 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9340 */
9341 static inline void atomic64_inc(atomic64_t *v)
9342 {
9343 + asm volatile(LOCK_PREFIX "incq %0\n"
9344 +
9345 +#ifdef CONFIG_PAX_REFCOUNT
9346 + "jno 0f\n"
9347 + LOCK_PREFIX "decq %0\n"
9348 + "int $4\n0:\n"
9349 + _ASM_EXTABLE(0b, 0b)
9350 +#endif
9351 +
9352 + : "=m" (v->counter)
9353 + : "m" (v->counter));
9354 +}
9355 +
9356 +/**
9357 + * atomic64_inc_unchecked - increment atomic64 variable
9358 + * @v: pointer to type atomic64_unchecked_t
9359 + *
9360 + * Atomically increments @v by 1.
9361 + */
9362 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9363 +{
9364 asm volatile(LOCK_PREFIX "incq %0"
9365 : "=m" (v->counter)
9366 : "m" (v->counter));
9367 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9368 */
9369 static inline void atomic64_dec(atomic64_t *v)
9370 {
9371 - asm volatile(LOCK_PREFIX "decq %0"
9372 + asm volatile(LOCK_PREFIX "decq %0\n"
9373 +
9374 +#ifdef CONFIG_PAX_REFCOUNT
9375 + "jno 0f\n"
9376 + LOCK_PREFIX "incq %0\n"
9377 + "int $4\n0:\n"
9378 + _ASM_EXTABLE(0b, 0b)
9379 +#endif
9380 +
9381 + : "=m" (v->counter)
9382 + : "m" (v->counter));
9383 +}
9384 +
9385 +/**
9386 + * atomic64_dec_unchecked - decrement atomic64 variable
9387 + * @v: pointer to type atomic64_t
9388 + *
9389 + * Atomically decrements @v by 1.
9390 + */
9391 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9392 +{
9393 + asm volatile(LOCK_PREFIX "decq %0\n"
9394 : "=m" (v->counter)
9395 : "m" (v->counter));
9396 }
9397 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9398 {
9399 unsigned char c;
9400
9401 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9402 + asm volatile(LOCK_PREFIX "decq %0\n"
9403 +
9404 +#ifdef CONFIG_PAX_REFCOUNT
9405 + "jno 0f\n"
9406 + LOCK_PREFIX "incq %0\n"
9407 + "int $4\n0:\n"
9408 + _ASM_EXTABLE(0b, 0b)
9409 +#endif
9410 +
9411 + "sete %1\n"
9412 : "=m" (v->counter), "=qm" (c)
9413 : "m" (v->counter) : "memory");
9414 return c != 0;
9415 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9416 {
9417 unsigned char c;
9418
9419 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9420 + asm volatile(LOCK_PREFIX "incq %0\n"
9421 +
9422 +#ifdef CONFIG_PAX_REFCOUNT
9423 + "jno 0f\n"
9424 + LOCK_PREFIX "decq %0\n"
9425 + "int $4\n0:\n"
9426 + _ASM_EXTABLE(0b, 0b)
9427 +#endif
9428 +
9429 + "sete %1\n"
9430 : "=m" (v->counter), "=qm" (c)
9431 : "m" (v->counter) : "memory");
9432 return c != 0;
9433 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9434 {
9435 unsigned char c;
9436
9437 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9438 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9439 +
9440 +#ifdef CONFIG_PAX_REFCOUNT
9441 + "jno 0f\n"
9442 + LOCK_PREFIX "subq %2,%0\n"
9443 + "int $4\n0:\n"
9444 + _ASM_EXTABLE(0b, 0b)
9445 +#endif
9446 +
9447 + "sets %1\n"
9448 : "=m" (v->counter), "=qm" (c)
9449 : "er" (i), "m" (v->counter) : "memory");
9450 return c;
9451 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9452 */
9453 static inline long atomic64_add_return(long i, atomic64_t *v)
9454 {
9455 + return i + xadd_check_overflow(&v->counter, i);
9456 +}
9457 +
9458 +/**
9459 + * atomic64_add_return_unchecked - add and return
9460 + * @i: integer value to add
9461 + * @v: pointer to type atomic64_unchecked_t
9462 + *
9463 + * Atomically adds @i to @v and returns @i + @v
9464 + */
9465 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9466 +{
9467 return i + xadd(&v->counter, i);
9468 }
9469
9470 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9471 }
9472
9473 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9474 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9475 +{
9476 + return atomic64_add_return_unchecked(1, v);
9477 +}
9478 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9479
9480 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9481 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9482 return cmpxchg(&v->counter, old, new);
9483 }
9484
9485 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9486 +{
9487 + return cmpxchg(&v->counter, old, new);
9488 +}
9489 +
9490 static inline long atomic64_xchg(atomic64_t *v, long new)
9491 {
9492 return xchg(&v->counter, new);
9493 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9494 */
9495 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9496 {
9497 - long c, old;
9498 + long c, old, new;
9499 c = atomic64_read(v);
9500 for (;;) {
9501 - if (unlikely(c == (u)))
9502 + if (unlikely(c == u))
9503 break;
9504 - old = atomic64_cmpxchg((v), c, c + (a));
9505 +
9506 + asm volatile("add %2,%0\n"
9507 +
9508 +#ifdef CONFIG_PAX_REFCOUNT
9509 + "jno 0f\n"
9510 + "sub %2,%0\n"
9511 + "int $4\n0:\n"
9512 + _ASM_EXTABLE(0b, 0b)
9513 +#endif
9514 +
9515 + : "=r" (new)
9516 + : "0" (c), "ir" (a));
9517 +
9518 + old = atomic64_cmpxchg(v, c, new);
9519 if (likely(old == c))
9520 break;
9521 c = old;
9522 }
9523 - return c != (u);
9524 + return c != u;
9525 }
9526
9527 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9528 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9529 index b97596e..9bd48b06 100644
9530 --- a/arch/x86/include/asm/bitops.h
9531 +++ b/arch/x86/include/asm/bitops.h
9532 @@ -38,7 +38,7 @@
9533 * a mask operation on a byte.
9534 */
9535 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9536 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9537 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9538 #define CONST_MASK(nr) (1 << ((nr) & 7))
9539
9540 /**
9541 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9542 index 5e1a2ee..c9f9533 100644
9543 --- a/arch/x86/include/asm/boot.h
9544 +++ b/arch/x86/include/asm/boot.h
9545 @@ -11,10 +11,15 @@
9546 #include <asm/pgtable_types.h>
9547
9548 /* Physical address where kernel should be loaded. */
9549 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9550 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9552 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9553
9554 +#ifndef __ASSEMBLY__
9555 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9556 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9557 +#endif
9558 +
9559 /* Minimum kernel alignment, as a power of two */
9560 #ifdef CONFIG_X86_64
9561 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9562 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9563 index 48f99f1..d78ebf9 100644
9564 --- a/arch/x86/include/asm/cache.h
9565 +++ b/arch/x86/include/asm/cache.h
9566 @@ -5,12 +5,13 @@
9567
9568 /* L1 cache line size */
9569 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9570 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9571 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9572
9573 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9574 +#define __read_only __attribute__((__section__(".data..read_only")))
9575
9576 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9577 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9578 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9579
9580 #ifdef CONFIG_X86_VSMP
9581 #ifdef CONFIG_SMP
9582 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9583 index 4e12668..501d239 100644
9584 --- a/arch/x86/include/asm/cacheflush.h
9585 +++ b/arch/x86/include/asm/cacheflush.h
9586 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9587 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9588
9589 if (pg_flags == _PGMT_DEFAULT)
9590 - return -1;
9591 + return ~0UL;
9592 else if (pg_flags == _PGMT_WC)
9593 return _PAGE_CACHE_WC;
9594 else if (pg_flags == _PGMT_UC_MINUS)
9595 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9596 index 46fc474..b02b0f9 100644
9597 --- a/arch/x86/include/asm/checksum_32.h
9598 +++ b/arch/x86/include/asm/checksum_32.h
9599 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9600 int len, __wsum sum,
9601 int *src_err_ptr, int *dst_err_ptr);
9602
9603 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9604 + int len, __wsum sum,
9605 + int *src_err_ptr, int *dst_err_ptr);
9606 +
9607 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9608 + int len, __wsum sum,
9609 + int *src_err_ptr, int *dst_err_ptr);
9610 +
9611 /*
9612 * Note: when you get a NULL pointer exception here this means someone
9613 * passed in an incorrect kernel address to one of these functions.
9614 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9615 int *err_ptr)
9616 {
9617 might_sleep();
9618 - return csum_partial_copy_generic((__force void *)src, dst,
9619 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9620 len, sum, err_ptr, NULL);
9621 }
9622
9623 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9624 {
9625 might_sleep();
9626 if (access_ok(VERIFY_WRITE, dst, len))
9627 - return csum_partial_copy_generic(src, (__force void *)dst,
9628 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9629 len, sum, NULL, err_ptr);
9630
9631 if (len)
9632 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9633 index b3b7332..3935f40 100644
9634 --- a/arch/x86/include/asm/cmpxchg.h
9635 +++ b/arch/x86/include/asm/cmpxchg.h
9636 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
9637 __compiletime_error("Bad argument size for cmpxchg");
9638 extern void __xadd_wrong_size(void)
9639 __compiletime_error("Bad argument size for xadd");
9640 +extern void __xadd_check_overflow_wrong_size(void)
9641 + __compiletime_error("Bad argument size for xadd_check_overflow");
9642 extern void __add_wrong_size(void)
9643 __compiletime_error("Bad argument size for add");
9644 +extern void __add_check_overflow_wrong_size(void)
9645 + __compiletime_error("Bad argument size for add_check_overflow");
9646
9647 /*
9648 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9649 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
9650 __ret; \
9651 })
9652
9653 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
9654 + ({ \
9655 + __typeof__ (*(ptr)) __ret = (arg); \
9656 + switch (sizeof(*(ptr))) { \
9657 + case __X86_CASE_L: \
9658 + asm volatile (lock #op "l %0, %1\n" \
9659 + "jno 0f\n" \
9660 + "mov %0,%1\n" \
9661 + "int $4\n0:\n" \
9662 + _ASM_EXTABLE(0b, 0b) \
9663 + : "+r" (__ret), "+m" (*(ptr)) \
9664 + : : "memory", "cc"); \
9665 + break; \
9666 + case __X86_CASE_Q: \
9667 + asm volatile (lock #op "q %q0, %1\n" \
9668 + "jno 0f\n" \
9669 + "mov %0,%1\n" \
9670 + "int $4\n0:\n" \
9671 + _ASM_EXTABLE(0b, 0b) \
9672 + : "+r" (__ret), "+m" (*(ptr)) \
9673 + : : "memory", "cc"); \
9674 + break; \
9675 + default: \
9676 + __ ## op ## _check_overflow_wrong_size(); \
9677 + } \
9678 + __ret; \
9679 + })
9680 +
9681 /*
9682 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
9683 * Since this is generally used to protect other memory information, we
9684 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
9685 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9686 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9687
9688 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
9689 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9690 +
9691 #define __add(ptr, inc, lock) \
9692 ({ \
9693 __typeof__ (*(ptr)) __ret = (inc); \
9694 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9695 index 8d67d42..183d0eb 100644
9696 --- a/arch/x86/include/asm/cpufeature.h
9697 +++ b/arch/x86/include/asm/cpufeature.h
9698 @@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9699 ".section .discard,\"aw\",@progbits\n"
9700 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9701 ".previous\n"
9702 - ".section .altinstr_replacement,\"ax\"\n"
9703 + ".section .altinstr_replacement,\"a\"\n"
9704 "3: movb $1,%0\n"
9705 "4:\n"
9706 ".previous\n"
9707 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9708 index e95822d..a90010e 100644
9709 --- a/arch/x86/include/asm/desc.h
9710 +++ b/arch/x86/include/asm/desc.h
9711 @@ -4,6 +4,7 @@
9712 #include <asm/desc_defs.h>
9713 #include <asm/ldt.h>
9714 #include <asm/mmu.h>
9715 +#include <asm/pgtable.h>
9716
9717 #include <linux/smp.h>
9718
9719 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9720
9721 desc->type = (info->read_exec_only ^ 1) << 1;
9722 desc->type |= info->contents << 2;
9723 + desc->type |= info->seg_not_present ^ 1;
9724
9725 desc->s = 1;
9726 desc->dpl = 0x3;
9727 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9728 }
9729
9730 extern struct desc_ptr idt_descr;
9731 -extern gate_desc idt_table[];
9732 extern struct desc_ptr nmi_idt_descr;
9733 -extern gate_desc nmi_idt_table[];
9734 -
9735 -struct gdt_page {
9736 - struct desc_struct gdt[GDT_ENTRIES];
9737 -} __attribute__((aligned(PAGE_SIZE)));
9738 -
9739 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9740 +extern gate_desc idt_table[256];
9741 +extern gate_desc nmi_idt_table[256];
9742
9743 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9744 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9745 {
9746 - return per_cpu(gdt_page, cpu).gdt;
9747 + return cpu_gdt_table[cpu];
9748 }
9749
9750 #ifdef CONFIG_X86_64
9751 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9752 unsigned long base, unsigned dpl, unsigned flags,
9753 unsigned short seg)
9754 {
9755 - gate->a = (seg << 16) | (base & 0xffff);
9756 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9757 + gate->gate.offset_low = base;
9758 + gate->gate.seg = seg;
9759 + gate->gate.reserved = 0;
9760 + gate->gate.type = type;
9761 + gate->gate.s = 0;
9762 + gate->gate.dpl = dpl;
9763 + gate->gate.p = 1;
9764 + gate->gate.offset_high = base >> 16;
9765 }
9766
9767 #endif
9768 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9769
9770 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9771 {
9772 + pax_open_kernel();
9773 memcpy(&idt[entry], gate, sizeof(*gate));
9774 + pax_close_kernel();
9775 }
9776
9777 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9778 {
9779 + pax_open_kernel();
9780 memcpy(&ldt[entry], desc, 8);
9781 + pax_close_kernel();
9782 }
9783
9784 static inline void
9785 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9786 default: size = sizeof(*gdt); break;
9787 }
9788
9789 + pax_open_kernel();
9790 memcpy(&gdt[entry], desc, size);
9791 + pax_close_kernel();
9792 }
9793
9794 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9795 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9796
9797 static inline void native_load_tr_desc(void)
9798 {
9799 + pax_open_kernel();
9800 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9801 + pax_close_kernel();
9802 }
9803
9804 static inline void native_load_gdt(const struct desc_ptr *dtr)
9805 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9806 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9807 unsigned int i;
9808
9809 + pax_open_kernel();
9810 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9811 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9812 + pax_close_kernel();
9813 }
9814
9815 #define _LDT_empty(info) \
9816 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9817 }
9818
9819 #ifdef CONFIG_X86_64
9820 -static inline void set_nmi_gate(int gate, void *addr)
9821 +static inline void set_nmi_gate(int gate, const void *addr)
9822 {
9823 gate_desc s;
9824
9825 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
9826 }
9827 #endif
9828
9829 -static inline void _set_gate(int gate, unsigned type, void *addr,
9830 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9831 unsigned dpl, unsigned ist, unsigned seg)
9832 {
9833 gate_desc s;
9834 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9835 * Pentium F0 0F bugfix can have resulted in the mapped
9836 * IDT being write-protected.
9837 */
9838 -static inline void set_intr_gate(unsigned int n, void *addr)
9839 +static inline void set_intr_gate(unsigned int n, const void *addr)
9840 {
9841 BUG_ON((unsigned)n > 0xFF);
9842 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9843 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9844 /*
9845 * This routine sets up an interrupt gate at directory privilege level 3.
9846 */
9847 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9848 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9849 {
9850 BUG_ON((unsigned)n > 0xFF);
9851 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9852 }
9853
9854 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9855 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9856 {
9857 BUG_ON((unsigned)n > 0xFF);
9858 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9859 }
9860
9861 -static inline void set_trap_gate(unsigned int n, void *addr)
9862 +static inline void set_trap_gate(unsigned int n, const void *addr)
9863 {
9864 BUG_ON((unsigned)n > 0xFF);
9865 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9866 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9867 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9868 {
9869 BUG_ON((unsigned)n > 0xFF);
9870 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9871 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9872 }
9873
9874 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9875 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9876 {
9877 BUG_ON((unsigned)n > 0xFF);
9878 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9879 }
9880
9881 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9882 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9883 {
9884 BUG_ON((unsigned)n > 0xFF);
9885 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9886 }
9887
9888 +#ifdef CONFIG_X86_32
9889 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9890 +{
9891 + struct desc_struct d;
9892 +
9893 + if (likely(limit))
9894 + limit = (limit - 1UL) >> PAGE_SHIFT;
9895 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9896 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9897 +}
9898 +#endif
9899 +
9900 #endif /* _ASM_X86_DESC_H */
9901 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9902 index 278441f..b95a174 100644
9903 --- a/arch/x86/include/asm/desc_defs.h
9904 +++ b/arch/x86/include/asm/desc_defs.h
9905 @@ -31,6 +31,12 @@ struct desc_struct {
9906 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9907 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9908 };
9909 + struct {
9910 + u16 offset_low;
9911 + u16 seg;
9912 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9913 + unsigned offset_high: 16;
9914 + } gate;
9915 };
9916 } __attribute__((packed));
9917
9918 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9919 index 3778256..c5d4fce 100644
9920 --- a/arch/x86/include/asm/e820.h
9921 +++ b/arch/x86/include/asm/e820.h
9922 @@ -69,7 +69,7 @@ struct e820map {
9923 #define ISA_START_ADDRESS 0xa0000
9924 #define ISA_END_ADDRESS 0x100000
9925
9926 -#define BIOS_BEGIN 0x000a0000
9927 +#define BIOS_BEGIN 0x000c0000
9928 #define BIOS_END 0x00100000
9929
9930 #define BIOS_ROM_BASE 0xffe00000
9931 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9932 index 5f962df..7289f09 100644
9933 --- a/arch/x86/include/asm/elf.h
9934 +++ b/arch/x86/include/asm/elf.h
9935 @@ -238,7 +238,25 @@ extern int force_personality32;
9936 the loader. We need to make sure that it is out of the way of the program
9937 that it will "exec", and that there is sufficient room for the brk. */
9938
9939 +#ifdef CONFIG_PAX_SEGMEXEC
9940 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9941 +#else
9942 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9943 +#endif
9944 +
9945 +#ifdef CONFIG_PAX_ASLR
9946 +#ifdef CONFIG_X86_32
9947 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9948 +
9949 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9950 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9951 +#else
9952 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9953 +
9954 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9955 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9956 +#endif
9957 +#endif
9958
9959 /* This yields a mask that user programs can use to figure out what
9960 instruction set this CPU supports. This could be done in user space,
9961 @@ -291,9 +309,7 @@ do { \
9962
9963 #define ARCH_DLINFO \
9964 do { \
9965 - if (vdso_enabled) \
9966 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9967 - (unsigned long)current->mm->context.vdso); \
9968 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9969 } while (0)
9970
9971 #define AT_SYSINFO 32
9972 @@ -304,7 +320,7 @@ do { \
9973
9974 #endif /* !CONFIG_X86_32 */
9975
9976 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9977 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9978
9979 #define VDSO_ENTRY \
9980 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9981 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9982 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9983 #define compat_arch_setup_additional_pages syscall32_setup_pages
9984
9985 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9986 -#define arch_randomize_brk arch_randomize_brk
9987 -
9988 /*
9989 * True on X86_32 or when emulating IA32 on X86_64
9990 */
9991 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9992 index cc70c1c..d96d011 100644
9993 --- a/arch/x86/include/asm/emergency-restart.h
9994 +++ b/arch/x86/include/asm/emergency-restart.h
9995 @@ -15,6 +15,6 @@ enum reboot_type {
9996
9997 extern enum reboot_type reboot_type;
9998
9999 -extern void machine_emergency_restart(void);
10000 +extern void machine_emergency_restart(void) __noreturn;
10001
10002 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10003 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10004 index d09bb03..4ea4194 100644
10005 --- a/arch/x86/include/asm/futex.h
10006 +++ b/arch/x86/include/asm/futex.h
10007 @@ -12,16 +12,18 @@
10008 #include <asm/system.h>
10009
10010 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10011 + typecheck(u32 __user *, uaddr); \
10012 asm volatile("1:\t" insn "\n" \
10013 "2:\t.section .fixup,\"ax\"\n" \
10014 "3:\tmov\t%3, %1\n" \
10015 "\tjmp\t2b\n" \
10016 "\t.previous\n" \
10017 _ASM_EXTABLE(1b, 3b) \
10018 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10019 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10020 : "i" (-EFAULT), "0" (oparg), "1" (0))
10021
10022 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10023 + typecheck(u32 __user *, uaddr); \
10024 asm volatile("1:\tmovl %2, %0\n" \
10025 "\tmovl\t%0, %3\n" \
10026 "\t" insn "\n" \
10027 @@ -34,7 +36,7 @@
10028 _ASM_EXTABLE(1b, 4b) \
10029 _ASM_EXTABLE(2b, 4b) \
10030 : "=&a" (oldval), "=&r" (ret), \
10031 - "+m" (*uaddr), "=&r" (tem) \
10032 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10033 : "r" (oparg), "i" (-EFAULT), "1" (0))
10034
10035 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10036 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10037
10038 switch (op) {
10039 case FUTEX_OP_SET:
10040 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10041 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10042 break;
10043 case FUTEX_OP_ADD:
10044 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10045 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10046 uaddr, oparg);
10047 break;
10048 case FUTEX_OP_OR:
10049 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10050 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10051 return -EFAULT;
10052
10053 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10054 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10055 "2:\t.section .fixup, \"ax\"\n"
10056 "3:\tmov %3, %0\n"
10057 "\tjmp 2b\n"
10058 "\t.previous\n"
10059 _ASM_EXTABLE(1b, 3b)
10060 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10061 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10062 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10063 : "memory"
10064 );
10065 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10066 index eb92a6e..b98b2f4 100644
10067 --- a/arch/x86/include/asm/hw_irq.h
10068 +++ b/arch/x86/include/asm/hw_irq.h
10069 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10070 extern void enable_IO_APIC(void);
10071
10072 /* Statistics */
10073 -extern atomic_t irq_err_count;
10074 -extern atomic_t irq_mis_count;
10075 +extern atomic_unchecked_t irq_err_count;
10076 +extern atomic_unchecked_t irq_mis_count;
10077
10078 /* EISA */
10079 extern void eisa_set_level_irq(unsigned int irq);
10080 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10081 index 2479049..3fb9795 100644
10082 --- a/arch/x86/include/asm/i387.h
10083 +++ b/arch/x86/include/asm/i387.h
10084 @@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10085 {
10086 int err;
10087
10088 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10089 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10090 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10091 +#endif
10092 +
10093 /* See comment in fxsave() below. */
10094 #ifdef CONFIG_AS_FXSAVEQ
10095 asm volatile("1: fxrstorq %[fx]\n\t"
10096 @@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10097 {
10098 int err;
10099
10100 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10101 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10102 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10103 +#endif
10104 +
10105 /*
10106 * Clear the bytes not touched by the fxsave and reserved
10107 * for the SW usage.
10108 @@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10109 "emms\n\t" /* clear stack tags */
10110 "fildl %P[addr]", /* set F?P to defined value */
10111 X86_FEATURE_FXSAVE_LEAK,
10112 - [addr] "m" (tsk->thread.fpu.has_fpu));
10113 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10114
10115 return fpu_restore_checking(&tsk->thread.fpu);
10116 }
10117 @@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10118 static inline bool interrupted_user_mode(void)
10119 {
10120 struct pt_regs *regs = get_irq_regs();
10121 - return regs && user_mode_vm(regs);
10122 + return regs && user_mode(regs);
10123 }
10124
10125 /*
10126 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10127 index d8e8eef..99f81ae 100644
10128 --- a/arch/x86/include/asm/io.h
10129 +++ b/arch/x86/include/asm/io.h
10130 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10131
10132 #include <linux/vmalloc.h>
10133
10134 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10135 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10136 +{
10137 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10138 +}
10139 +
10140 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10141 +{
10142 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10143 +}
10144 +
10145 /*
10146 * Convert a virtual cached pointer to an uncached pointer
10147 */
10148 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10149 index bba3cf8..06bc8da 100644
10150 --- a/arch/x86/include/asm/irqflags.h
10151 +++ b/arch/x86/include/asm/irqflags.h
10152 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10153 sti; \
10154 sysexit
10155
10156 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10157 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10158 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10159 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10160 +
10161 #else
10162 #define INTERRUPT_RETURN iret
10163 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10164 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10165 index 5478825..839e88c 100644
10166 --- a/arch/x86/include/asm/kprobes.h
10167 +++ b/arch/x86/include/asm/kprobes.h
10168 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10169 #define RELATIVEJUMP_SIZE 5
10170 #define RELATIVECALL_OPCODE 0xe8
10171 #define RELATIVE_ADDR_SIZE 4
10172 -#define MAX_STACK_SIZE 64
10173 -#define MIN_STACK_SIZE(ADDR) \
10174 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10175 - THREAD_SIZE - (unsigned long)(ADDR))) \
10176 - ? (MAX_STACK_SIZE) \
10177 - : (((unsigned long)current_thread_info()) + \
10178 - THREAD_SIZE - (unsigned long)(ADDR)))
10179 +#define MAX_STACK_SIZE 64UL
10180 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10181
10182 #define flush_insn_slot(p) do { } while (0)
10183
10184 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10185 index 52d6640..a013b87 100644
10186 --- a/arch/x86/include/asm/kvm_host.h
10187 +++ b/arch/x86/include/asm/kvm_host.h
10188 @@ -663,7 +663,7 @@ struct kvm_x86_ops {
10189 int (*check_intercept)(struct kvm_vcpu *vcpu,
10190 struct x86_instruction_info *info,
10191 enum x86_intercept_stage stage);
10192 -};
10193 +} __do_const;
10194
10195 struct kvm_arch_async_pf {
10196 u32 token;
10197 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10198 index 9cdae5d..300d20f 100644
10199 --- a/arch/x86/include/asm/local.h
10200 +++ b/arch/x86/include/asm/local.h
10201 @@ -18,26 +18,58 @@ typedef struct {
10202
10203 static inline void local_inc(local_t *l)
10204 {
10205 - asm volatile(_ASM_INC "%0"
10206 + asm volatile(_ASM_INC "%0\n"
10207 +
10208 +#ifdef CONFIG_PAX_REFCOUNT
10209 + "jno 0f\n"
10210 + _ASM_DEC "%0\n"
10211 + "int $4\n0:\n"
10212 + _ASM_EXTABLE(0b, 0b)
10213 +#endif
10214 +
10215 : "+m" (l->a.counter));
10216 }
10217
10218 static inline void local_dec(local_t *l)
10219 {
10220 - asm volatile(_ASM_DEC "%0"
10221 + asm volatile(_ASM_DEC "%0\n"
10222 +
10223 +#ifdef CONFIG_PAX_REFCOUNT
10224 + "jno 0f\n"
10225 + _ASM_INC "%0\n"
10226 + "int $4\n0:\n"
10227 + _ASM_EXTABLE(0b, 0b)
10228 +#endif
10229 +
10230 : "+m" (l->a.counter));
10231 }
10232
10233 static inline void local_add(long i, local_t *l)
10234 {
10235 - asm volatile(_ASM_ADD "%1,%0"
10236 + asm volatile(_ASM_ADD "%1,%0\n"
10237 +
10238 +#ifdef CONFIG_PAX_REFCOUNT
10239 + "jno 0f\n"
10240 + _ASM_SUB "%1,%0\n"
10241 + "int $4\n0:\n"
10242 + _ASM_EXTABLE(0b, 0b)
10243 +#endif
10244 +
10245 : "+m" (l->a.counter)
10246 : "ir" (i));
10247 }
10248
10249 static inline void local_sub(long i, local_t *l)
10250 {
10251 - asm volatile(_ASM_SUB "%1,%0"
10252 + asm volatile(_ASM_SUB "%1,%0\n"
10253 +
10254 +#ifdef CONFIG_PAX_REFCOUNT
10255 + "jno 0f\n"
10256 + _ASM_ADD "%1,%0\n"
10257 + "int $4\n0:\n"
10258 + _ASM_EXTABLE(0b, 0b)
10259 +#endif
10260 +
10261 : "+m" (l->a.counter)
10262 : "ir" (i));
10263 }
10264 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10265 {
10266 unsigned char c;
10267
10268 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10269 + asm volatile(_ASM_SUB "%2,%0\n"
10270 +
10271 +#ifdef CONFIG_PAX_REFCOUNT
10272 + "jno 0f\n"
10273 + _ASM_ADD "%2,%0\n"
10274 + "int $4\n0:\n"
10275 + _ASM_EXTABLE(0b, 0b)
10276 +#endif
10277 +
10278 + "sete %1\n"
10279 : "+m" (l->a.counter), "=qm" (c)
10280 : "ir" (i) : "memory");
10281 return c;
10282 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10283 {
10284 unsigned char c;
10285
10286 - asm volatile(_ASM_DEC "%0; sete %1"
10287 + asm volatile(_ASM_DEC "%0\n"
10288 +
10289 +#ifdef CONFIG_PAX_REFCOUNT
10290 + "jno 0f\n"
10291 + _ASM_INC "%0\n"
10292 + "int $4\n0:\n"
10293 + _ASM_EXTABLE(0b, 0b)
10294 +#endif
10295 +
10296 + "sete %1\n"
10297 : "+m" (l->a.counter), "=qm" (c)
10298 : : "memory");
10299 return c != 0;
10300 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10301 {
10302 unsigned char c;
10303
10304 - asm volatile(_ASM_INC "%0; sete %1"
10305 + asm volatile(_ASM_INC "%0\n"
10306 +
10307 +#ifdef CONFIG_PAX_REFCOUNT
10308 + "jno 0f\n"
10309 + _ASM_DEC "%0\n"
10310 + "int $4\n0:\n"
10311 + _ASM_EXTABLE(0b, 0b)
10312 +#endif
10313 +
10314 + "sete %1\n"
10315 : "+m" (l->a.counter), "=qm" (c)
10316 : : "memory");
10317 return c != 0;
10318 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10319 {
10320 unsigned char c;
10321
10322 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10323 + asm volatile(_ASM_ADD "%2,%0\n"
10324 +
10325 +#ifdef CONFIG_PAX_REFCOUNT
10326 + "jno 0f\n"
10327 + _ASM_SUB "%2,%0\n"
10328 + "int $4\n0:\n"
10329 + _ASM_EXTABLE(0b, 0b)
10330 +#endif
10331 +
10332 + "sets %1\n"
10333 : "+m" (l->a.counter), "=qm" (c)
10334 : "ir" (i) : "memory");
10335 return c;
10336 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10337 #endif
10338 /* Modern 486+ processor */
10339 __i = i;
10340 - asm volatile(_ASM_XADD "%0, %1;"
10341 + asm volatile(_ASM_XADD "%0, %1\n"
10342 +
10343 +#ifdef CONFIG_PAX_REFCOUNT
10344 + "jno 0f\n"
10345 + _ASM_MOV "%0,%1\n"
10346 + "int $4\n0:\n"
10347 + _ASM_EXTABLE(0b, 0b)
10348 +#endif
10349 +
10350 : "+r" (i), "+m" (l->a.counter)
10351 : : "memory");
10352 return i + __i;
10353 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10354 index 593e51d..fa69c9a 100644
10355 --- a/arch/x86/include/asm/mman.h
10356 +++ b/arch/x86/include/asm/mman.h
10357 @@ -5,4 +5,14 @@
10358
10359 #include <asm-generic/mman.h>
10360
10361 +#ifdef __KERNEL__
10362 +#ifndef __ASSEMBLY__
10363 +#ifdef CONFIG_X86_32
10364 +#define arch_mmap_check i386_mmap_check
10365 +int i386_mmap_check(unsigned long addr, unsigned long len,
10366 + unsigned long flags);
10367 +#endif
10368 +#endif
10369 +#endif
10370 +
10371 #endif /* _ASM_X86_MMAN_H */
10372 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10373 index 5f55e69..e20bfb1 100644
10374 --- a/arch/x86/include/asm/mmu.h
10375 +++ b/arch/x86/include/asm/mmu.h
10376 @@ -9,7 +9,7 @@
10377 * we put the segment information here.
10378 */
10379 typedef struct {
10380 - void *ldt;
10381 + struct desc_struct *ldt;
10382 int size;
10383
10384 #ifdef CONFIG_X86_64
10385 @@ -18,7 +18,19 @@ typedef struct {
10386 #endif
10387
10388 struct mutex lock;
10389 - void *vdso;
10390 + unsigned long vdso;
10391 +
10392 +#ifdef CONFIG_X86_32
10393 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10394 + unsigned long user_cs_base;
10395 + unsigned long user_cs_limit;
10396 +
10397 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10398 + cpumask_t cpu_user_cs_mask;
10399 +#endif
10400 +
10401 +#endif
10402 +#endif
10403 } mm_context_t;
10404
10405 #ifdef CONFIG_SMP
10406 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10407 index 6902152..399f3a2 100644
10408 --- a/arch/x86/include/asm/mmu_context.h
10409 +++ b/arch/x86/include/asm/mmu_context.h
10410 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10411
10412 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10413 {
10414 +
10415 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10416 + unsigned int i;
10417 + pgd_t *pgd;
10418 +
10419 + pax_open_kernel();
10420 + pgd = get_cpu_pgd(smp_processor_id());
10421 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10422 + set_pgd_batched(pgd+i, native_make_pgd(0));
10423 + pax_close_kernel();
10424 +#endif
10425 +
10426 #ifdef CONFIG_SMP
10427 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10428 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10429 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10430 struct task_struct *tsk)
10431 {
10432 unsigned cpu = smp_processor_id();
10433 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10434 + int tlbstate = TLBSTATE_OK;
10435 +#endif
10436
10437 if (likely(prev != next)) {
10438 #ifdef CONFIG_SMP
10439 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10440 + tlbstate = percpu_read(cpu_tlbstate.state);
10441 +#endif
10442 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10443 percpu_write(cpu_tlbstate.active_mm, next);
10444 #endif
10445 cpumask_set_cpu(cpu, mm_cpumask(next));
10446
10447 /* Re-load page tables */
10448 +#ifdef CONFIG_PAX_PER_CPU_PGD
10449 + pax_open_kernel();
10450 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10451 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10452 + pax_close_kernel();
10453 + load_cr3(get_cpu_pgd(cpu));
10454 +#else
10455 load_cr3(next->pgd);
10456 +#endif
10457
10458 /* stop flush ipis for the previous mm */
10459 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10460 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10461 */
10462 if (unlikely(prev->context.ldt != next->context.ldt))
10463 load_LDT_nolock(&next->context);
10464 - }
10465 +
10466 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10467 + if (!(__supported_pte_mask & _PAGE_NX)) {
10468 + smp_mb__before_clear_bit();
10469 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10470 + smp_mb__after_clear_bit();
10471 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10472 + }
10473 +#endif
10474 +
10475 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10476 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10477 + prev->context.user_cs_limit != next->context.user_cs_limit))
10478 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10479 #ifdef CONFIG_SMP
10480 + else if (unlikely(tlbstate != TLBSTATE_OK))
10481 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10482 +#endif
10483 +#endif
10484 +
10485 + }
10486 else {
10487 +
10488 +#ifdef CONFIG_PAX_PER_CPU_PGD
10489 + pax_open_kernel();
10490 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10491 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10492 + pax_close_kernel();
10493 + load_cr3(get_cpu_pgd(cpu));
10494 +#endif
10495 +
10496 +#ifdef CONFIG_SMP
10497 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10498 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10499
10500 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10501 * tlb flush IPI delivery. We must reload CR3
10502 * to make sure to use no freed page tables.
10503 */
10504 +
10505 +#ifndef CONFIG_PAX_PER_CPU_PGD
10506 load_cr3(next->pgd);
10507 +#endif
10508 +
10509 load_LDT_nolock(&next->context);
10510 +
10511 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10512 + if (!(__supported_pte_mask & _PAGE_NX))
10513 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10514 +#endif
10515 +
10516 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10517 +#ifdef CONFIG_PAX_PAGEEXEC
10518 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10519 +#endif
10520 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10521 +#endif
10522 +
10523 }
10524 +#endif
10525 }
10526 -#endif
10527 }
10528
10529 #define activate_mm(prev, next) \
10530 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10531 index 9eae775..c914fea 100644
10532 --- a/arch/x86/include/asm/module.h
10533 +++ b/arch/x86/include/asm/module.h
10534 @@ -5,6 +5,7 @@
10535
10536 #ifdef CONFIG_X86_64
10537 /* X86_64 does not define MODULE_PROC_FAMILY */
10538 +#define MODULE_PROC_FAMILY ""
10539 #elif defined CONFIG_M386
10540 #define MODULE_PROC_FAMILY "386 "
10541 #elif defined CONFIG_M486
10542 @@ -59,8 +60,20 @@
10543 #error unknown processor family
10544 #endif
10545
10546 -#ifdef CONFIG_X86_32
10547 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10548 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10549 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10550 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10551 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10552 +#else
10553 +#define MODULE_PAX_KERNEXEC ""
10554 #endif
10555
10556 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10557 +#define MODULE_PAX_UDEREF "UDEREF "
10558 +#else
10559 +#define MODULE_PAX_UDEREF ""
10560 +#endif
10561 +
10562 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10563 +
10564 #endif /* _ASM_X86_MODULE_H */
10565 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10566 index 7639dbf..e08a58c 100644
10567 --- a/arch/x86/include/asm/page_64_types.h
10568 +++ b/arch/x86/include/asm/page_64_types.h
10569 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10570
10571 /* duplicated to the one in bootmem.h */
10572 extern unsigned long max_pfn;
10573 -extern unsigned long phys_base;
10574 +extern const unsigned long phys_base;
10575
10576 extern unsigned long __phys_addr(unsigned long);
10577 #define __phys_reloc_hide(x) (x)
10578 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10579 index a7d2db9..edb023e 100644
10580 --- a/arch/x86/include/asm/paravirt.h
10581 +++ b/arch/x86/include/asm/paravirt.h
10582 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10583 val);
10584 }
10585
10586 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10587 +{
10588 + pgdval_t val = native_pgd_val(pgd);
10589 +
10590 + if (sizeof(pgdval_t) > sizeof(long))
10591 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10592 + val, (u64)val >> 32);
10593 + else
10594 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10595 + val);
10596 +}
10597 +
10598 static inline void pgd_clear(pgd_t *pgdp)
10599 {
10600 set_pgd(pgdp, __pgd(0));
10601 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10602 pv_mmu_ops.set_fixmap(idx, phys, flags);
10603 }
10604
10605 +#ifdef CONFIG_PAX_KERNEXEC
10606 +static inline unsigned long pax_open_kernel(void)
10607 +{
10608 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10609 +}
10610 +
10611 +static inline unsigned long pax_close_kernel(void)
10612 +{
10613 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10614 +}
10615 +#else
10616 +static inline unsigned long pax_open_kernel(void) { return 0; }
10617 +static inline unsigned long pax_close_kernel(void) { return 0; }
10618 +#endif
10619 +
10620 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10621
10622 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10623 @@ -964,7 +991,7 @@ extern void default_banner(void);
10624
10625 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10626 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10627 -#define PARA_INDIRECT(addr) *%cs:addr
10628 +#define PARA_INDIRECT(addr) *%ss:addr
10629 #endif
10630
10631 #define INTERRUPT_RETURN \
10632 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
10633 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10634 CLBR_NONE, \
10635 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10636 +
10637 +#define GET_CR0_INTO_RDI \
10638 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10639 + mov %rax,%rdi
10640 +
10641 +#define SET_RDI_INTO_CR0 \
10642 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10643 +
10644 +#define GET_CR3_INTO_RDI \
10645 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10646 + mov %rax,%rdi
10647 +
10648 +#define SET_RDI_INTO_CR3 \
10649 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10650 +
10651 #endif /* CONFIG_X86_32 */
10652
10653 #endif /* __ASSEMBLY__ */
10654 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10655 index 8e8b9a4..f07d725 100644
10656 --- a/arch/x86/include/asm/paravirt_types.h
10657 +++ b/arch/x86/include/asm/paravirt_types.h
10658 @@ -84,20 +84,20 @@ struct pv_init_ops {
10659 */
10660 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10661 unsigned long addr, unsigned len);
10662 -};
10663 +} __no_const;
10664
10665
10666 struct pv_lazy_ops {
10667 /* Set deferred update mode, used for batching operations. */
10668 void (*enter)(void);
10669 void (*leave)(void);
10670 -};
10671 +} __no_const;
10672
10673 struct pv_time_ops {
10674 unsigned long long (*sched_clock)(void);
10675 unsigned long long (*steal_clock)(int cpu);
10676 unsigned long (*get_tsc_khz)(void);
10677 -};
10678 +} __no_const;
10679
10680 struct pv_cpu_ops {
10681 /* hooks for various privileged instructions */
10682 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
10683
10684 void (*start_context_switch)(struct task_struct *prev);
10685 void (*end_context_switch)(struct task_struct *next);
10686 -};
10687 +} __no_const;
10688
10689 struct pv_irq_ops {
10690 /*
10691 @@ -224,7 +224,7 @@ struct pv_apic_ops {
10692 unsigned long start_eip,
10693 unsigned long start_esp);
10694 #endif
10695 -};
10696 +} __no_const;
10697
10698 struct pv_mmu_ops {
10699 unsigned long (*read_cr2)(void);
10700 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
10701 struct paravirt_callee_save make_pud;
10702
10703 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10704 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10705 #endif /* PAGETABLE_LEVELS == 4 */
10706 #endif /* PAGETABLE_LEVELS >= 3 */
10707
10708 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
10709 an mfn. We can tell which is which from the index. */
10710 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10711 phys_addr_t phys, pgprot_t flags);
10712 +
10713 +#ifdef CONFIG_PAX_KERNEXEC
10714 + unsigned long (*pax_open_kernel)(void);
10715 + unsigned long (*pax_close_kernel)(void);
10716 +#endif
10717 +
10718 };
10719
10720 struct arch_spinlock;
10721 @@ -334,7 +341,7 @@ struct pv_lock_ops {
10722 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10723 int (*spin_trylock)(struct arch_spinlock *lock);
10724 void (*spin_unlock)(struct arch_spinlock *lock);
10725 -};
10726 +} __no_const;
10727
10728 /* This contains all the paravirt structures: we get a convenient
10729 * number for each function using the offset which we use to indicate
10730 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10731 index b4389a4..b7ff22c 100644
10732 --- a/arch/x86/include/asm/pgalloc.h
10733 +++ b/arch/x86/include/asm/pgalloc.h
10734 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10735 pmd_t *pmd, pte_t *pte)
10736 {
10737 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10738 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10739 +}
10740 +
10741 +static inline void pmd_populate_user(struct mm_struct *mm,
10742 + pmd_t *pmd, pte_t *pte)
10743 +{
10744 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10745 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10746 }
10747
10748 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10749 index 98391db..8f6984e 100644
10750 --- a/arch/x86/include/asm/pgtable-2level.h
10751 +++ b/arch/x86/include/asm/pgtable-2level.h
10752 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10753
10754 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10755 {
10756 + pax_open_kernel();
10757 *pmdp = pmd;
10758 + pax_close_kernel();
10759 }
10760
10761 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10762 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10763 index effff47..f9e4035 100644
10764 --- a/arch/x86/include/asm/pgtable-3level.h
10765 +++ b/arch/x86/include/asm/pgtable-3level.h
10766 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10767
10768 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10769 {
10770 + pax_open_kernel();
10771 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10772 + pax_close_kernel();
10773 }
10774
10775 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10776 {
10777 + pax_open_kernel();
10778 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10779 + pax_close_kernel();
10780 }
10781
10782 /*
10783 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10784 index 49afb3f..ed14d07 100644
10785 --- a/arch/x86/include/asm/pgtable.h
10786 +++ b/arch/x86/include/asm/pgtable.h
10787 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10788
10789 #ifndef __PAGETABLE_PUD_FOLDED
10790 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10791 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10792 #define pgd_clear(pgd) native_pgd_clear(pgd)
10793 #endif
10794
10795 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10796
10797 #define arch_end_context_switch(prev) do {} while(0)
10798
10799 +#define pax_open_kernel() native_pax_open_kernel()
10800 +#define pax_close_kernel() native_pax_close_kernel()
10801 #endif /* CONFIG_PARAVIRT */
10802
10803 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10804 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10805 +
10806 +#ifdef CONFIG_PAX_KERNEXEC
10807 +static inline unsigned long native_pax_open_kernel(void)
10808 +{
10809 + unsigned long cr0;
10810 +
10811 + preempt_disable();
10812 + barrier();
10813 + cr0 = read_cr0() ^ X86_CR0_WP;
10814 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
10815 + write_cr0(cr0);
10816 + return cr0 ^ X86_CR0_WP;
10817 +}
10818 +
10819 +static inline unsigned long native_pax_close_kernel(void)
10820 +{
10821 + unsigned long cr0;
10822 +
10823 + cr0 = read_cr0() ^ X86_CR0_WP;
10824 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10825 + write_cr0(cr0);
10826 + barrier();
10827 + preempt_enable_no_resched();
10828 + return cr0 ^ X86_CR0_WP;
10829 +}
10830 +#else
10831 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
10832 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
10833 +#endif
10834 +
10835 /*
10836 * The following only work if pte_present() is true.
10837 * Undefined behaviour if not..
10838 */
10839 +static inline int pte_user(pte_t pte)
10840 +{
10841 + return pte_val(pte) & _PAGE_USER;
10842 +}
10843 +
10844 static inline int pte_dirty(pte_t pte)
10845 {
10846 return pte_flags(pte) & _PAGE_DIRTY;
10847 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10848 return pte_clear_flags(pte, _PAGE_RW);
10849 }
10850
10851 +static inline pte_t pte_mkread(pte_t pte)
10852 +{
10853 + return __pte(pte_val(pte) | _PAGE_USER);
10854 +}
10855 +
10856 static inline pte_t pte_mkexec(pte_t pte)
10857 {
10858 - return pte_clear_flags(pte, _PAGE_NX);
10859 +#ifdef CONFIG_X86_PAE
10860 + if (__supported_pte_mask & _PAGE_NX)
10861 + return pte_clear_flags(pte, _PAGE_NX);
10862 + else
10863 +#endif
10864 + return pte_set_flags(pte, _PAGE_USER);
10865 +}
10866 +
10867 +static inline pte_t pte_exprotect(pte_t pte)
10868 +{
10869 +#ifdef CONFIG_X86_PAE
10870 + if (__supported_pte_mask & _PAGE_NX)
10871 + return pte_set_flags(pte, _PAGE_NX);
10872 + else
10873 +#endif
10874 + return pte_clear_flags(pte, _PAGE_USER);
10875 }
10876
10877 static inline pte_t pte_mkdirty(pte_t pte)
10878 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10879 #endif
10880
10881 #ifndef __ASSEMBLY__
10882 +
10883 +#ifdef CONFIG_PAX_PER_CPU_PGD
10884 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10885 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10886 +{
10887 + return cpu_pgd[cpu];
10888 +}
10889 +#endif
10890 +
10891 #include <linux/mm_types.h>
10892
10893 static inline int pte_none(pte_t pte)
10894 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10895
10896 static inline int pgd_bad(pgd_t pgd)
10897 {
10898 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10899 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10900 }
10901
10902 static inline int pgd_none(pgd_t pgd)
10903 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10904 * pgd_offset() returns a (pgd_t *)
10905 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10906 */
10907 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10908 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10909 +
10910 +#ifdef CONFIG_PAX_PER_CPU_PGD
10911 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10912 +#endif
10913 +
10914 /*
10915 * a shortcut which implies the use of the kernel's pgd, instead
10916 * of a process's
10917 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10918 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10919 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10920
10921 +#ifdef CONFIG_X86_32
10922 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10923 +#else
10924 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10925 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10926 +
10927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10928 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10929 +#else
10930 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10931 +#endif
10932 +
10933 +#endif
10934 +
10935 #ifndef __ASSEMBLY__
10936
10937 extern int direct_gbpages;
10938 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10939 * dst and src can be on the same page, but the range must not overlap,
10940 * and must not cross a page boundary.
10941 */
10942 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10943 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10944 {
10945 - memcpy(dst, src, count * sizeof(pgd_t));
10946 + pax_open_kernel();
10947 + while (count--)
10948 + *dst++ = *src++;
10949 + pax_close_kernel();
10950 }
10951
10952 +#ifdef CONFIG_PAX_PER_CPU_PGD
10953 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10954 +#endif
10955 +
10956 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10957 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10958 +#else
10959 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
10960 +#endif
10961
10962 #include <asm-generic/pgtable.h>
10963 #endif /* __ASSEMBLY__ */
10964 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
10965 index 0c92113..34a77c6 100644
10966 --- a/arch/x86/include/asm/pgtable_32.h
10967 +++ b/arch/x86/include/asm/pgtable_32.h
10968 @@ -25,9 +25,6 @@
10969 struct mm_struct;
10970 struct vm_area_struct;
10971
10972 -extern pgd_t swapper_pg_dir[1024];
10973 -extern pgd_t initial_page_table[1024];
10974 -
10975 static inline void pgtable_cache_init(void) { }
10976 static inline void check_pgt_cache(void) { }
10977 void paging_init(void);
10978 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10979 # include <asm/pgtable-2level.h>
10980 #endif
10981
10982 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
10983 +extern pgd_t initial_page_table[PTRS_PER_PGD];
10984 +#ifdef CONFIG_X86_PAE
10985 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
10986 +#endif
10987 +
10988 #if defined(CONFIG_HIGHPTE)
10989 #define pte_offset_map(dir, address) \
10990 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
10991 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10992 /* Clear a kernel PTE and flush it from the TLB */
10993 #define kpte_clear_flush(ptep, vaddr) \
10994 do { \
10995 + pax_open_kernel(); \
10996 pte_clear(&init_mm, (vaddr), (ptep)); \
10997 + pax_close_kernel(); \
10998 __flush_tlb_one((vaddr)); \
10999 } while (0)
11000
11001 @@ -74,6 +79,9 @@ do { \
11002
11003 #endif /* !__ASSEMBLY__ */
11004
11005 +#define HAVE_ARCH_UNMAPPED_AREA
11006 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11007 +
11008 /*
11009 * kern_addr_valid() is (1) for FLATMEM and (0) for
11010 * SPARSEMEM and DISCONTIGMEM
11011 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11012 index ed5903b..c7fe163 100644
11013 --- a/arch/x86/include/asm/pgtable_32_types.h
11014 +++ b/arch/x86/include/asm/pgtable_32_types.h
11015 @@ -8,7 +8,7 @@
11016 */
11017 #ifdef CONFIG_X86_PAE
11018 # include <asm/pgtable-3level_types.h>
11019 -# define PMD_SIZE (1UL << PMD_SHIFT)
11020 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11021 # define PMD_MASK (~(PMD_SIZE - 1))
11022 #else
11023 # include <asm/pgtable-2level_types.h>
11024 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11025 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11026 #endif
11027
11028 +#ifdef CONFIG_PAX_KERNEXEC
11029 +#ifndef __ASSEMBLY__
11030 +extern unsigned char MODULES_EXEC_VADDR[];
11031 +extern unsigned char MODULES_EXEC_END[];
11032 +#endif
11033 +#include <asm/boot.h>
11034 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11035 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11036 +#else
11037 +#define ktla_ktva(addr) (addr)
11038 +#define ktva_ktla(addr) (addr)
11039 +#endif
11040 +
11041 #define MODULES_VADDR VMALLOC_START
11042 #define MODULES_END VMALLOC_END
11043 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11044 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11045 index 975f709..107976d 100644
11046 --- a/arch/x86/include/asm/pgtable_64.h
11047 +++ b/arch/x86/include/asm/pgtable_64.h
11048 @@ -16,10 +16,14 @@
11049
11050 extern pud_t level3_kernel_pgt[512];
11051 extern pud_t level3_ident_pgt[512];
11052 +extern pud_t level3_vmalloc_start_pgt[512];
11053 +extern pud_t level3_vmalloc_end_pgt[512];
11054 +extern pud_t level3_vmemmap_pgt[512];
11055 +extern pud_t level2_vmemmap_pgt[512];
11056 extern pmd_t level2_kernel_pgt[512];
11057 extern pmd_t level2_fixmap_pgt[512];
11058 -extern pmd_t level2_ident_pgt[512];
11059 -extern pgd_t init_level4_pgt[];
11060 +extern pmd_t level2_ident_pgt[512*2];
11061 +extern pgd_t init_level4_pgt[512];
11062
11063 #define swapper_pg_dir init_level4_pgt
11064
11065 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11066
11067 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11068 {
11069 + pax_open_kernel();
11070 *pmdp = pmd;
11071 + pax_close_kernel();
11072 }
11073
11074 static inline void native_pmd_clear(pmd_t *pmd)
11075 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11076
11077 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11078 {
11079 + pax_open_kernel();
11080 + *pgdp = pgd;
11081 + pax_close_kernel();
11082 +}
11083 +
11084 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11085 +{
11086 *pgdp = pgd;
11087 }
11088
11089 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11090 index 766ea16..5b96cb3 100644
11091 --- a/arch/x86/include/asm/pgtable_64_types.h
11092 +++ b/arch/x86/include/asm/pgtable_64_types.h
11093 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11094 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11095 #define MODULES_END _AC(0xffffffffff000000, UL)
11096 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11097 +#define MODULES_EXEC_VADDR MODULES_VADDR
11098 +#define MODULES_EXEC_END MODULES_END
11099 +
11100 +#define ktla_ktva(addr) (addr)
11101 +#define ktva_ktla(addr) (addr)
11102
11103 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11104 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11105 index 013286a..8b42f4f 100644
11106 --- a/arch/x86/include/asm/pgtable_types.h
11107 +++ b/arch/x86/include/asm/pgtable_types.h
11108 @@ -16,13 +16,12 @@
11109 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11110 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11111 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11112 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11113 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11114 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11115 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11116 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11117 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11118 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11119 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11120 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11121 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11122 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11123
11124 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11125 @@ -40,7 +39,6 @@
11126 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11127 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11128 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11129 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11130 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11131 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11132 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11133 @@ -57,8 +55,10 @@
11134
11135 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11136 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11137 -#else
11138 +#elif defined(CONFIG_KMEMCHECK)
11139 #define _PAGE_NX (_AT(pteval_t, 0))
11140 +#else
11141 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11142 #endif
11143
11144 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11145 @@ -96,6 +96,9 @@
11146 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11147 _PAGE_ACCESSED)
11148
11149 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11150 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11151 +
11152 #define __PAGE_KERNEL_EXEC \
11153 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11154 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11155 @@ -106,7 +109,7 @@
11156 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11157 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11158 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11159 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11160 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11161 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11162 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11163 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11164 @@ -168,8 +171,8 @@
11165 * bits are combined, this will alow user to access the high address mapped
11166 * VDSO in the presence of CONFIG_COMPAT_VDSO
11167 */
11168 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11169 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11170 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11171 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11172 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11173 #endif
11174
11175 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11176 {
11177 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11178 }
11179 +#endif
11180
11181 +#if PAGETABLE_LEVELS == 3
11182 +#include <asm-generic/pgtable-nopud.h>
11183 +#endif
11184 +
11185 +#if PAGETABLE_LEVELS == 2
11186 +#include <asm-generic/pgtable-nopmd.h>
11187 +#endif
11188 +
11189 +#ifndef __ASSEMBLY__
11190 #if PAGETABLE_LEVELS > 3
11191 typedef struct { pudval_t pud; } pud_t;
11192
11193 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11194 return pud.pud;
11195 }
11196 #else
11197 -#include <asm-generic/pgtable-nopud.h>
11198 -
11199 static inline pudval_t native_pud_val(pud_t pud)
11200 {
11201 return native_pgd_val(pud.pgd);
11202 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11203 return pmd.pmd;
11204 }
11205 #else
11206 -#include <asm-generic/pgtable-nopmd.h>
11207 -
11208 static inline pmdval_t native_pmd_val(pmd_t pmd)
11209 {
11210 return native_pgd_val(pmd.pud.pgd);
11211 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11212
11213 extern pteval_t __supported_pte_mask;
11214 extern void set_nx(void);
11215 -extern int nx_enabled;
11216
11217 #define pgprot_writecombine pgprot_writecombine
11218 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11219 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11220 index 58545c9..fe6fc38e 100644
11221 --- a/arch/x86/include/asm/processor.h
11222 +++ b/arch/x86/include/asm/processor.h
11223 @@ -266,7 +266,7 @@ struct tss_struct {
11224
11225 } ____cacheline_aligned;
11226
11227 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11228 +extern struct tss_struct init_tss[NR_CPUS];
11229
11230 /*
11231 * Save the original ist values for checking stack pointers during debugging
11232 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11233 */
11234 #define TASK_SIZE PAGE_OFFSET
11235 #define TASK_SIZE_MAX TASK_SIZE
11236 +
11237 +#ifdef CONFIG_PAX_SEGMEXEC
11238 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11239 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11240 +#else
11241 #define STACK_TOP TASK_SIZE
11242 -#define STACK_TOP_MAX STACK_TOP
11243 +#endif
11244 +
11245 +#define STACK_TOP_MAX TASK_SIZE
11246
11247 #define INIT_THREAD { \
11248 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11249 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11250 .vm86_info = NULL, \
11251 .sysenter_cs = __KERNEL_CS, \
11252 .io_bitmap_ptr = NULL, \
11253 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11254 */
11255 #define INIT_TSS { \
11256 .x86_tss = { \
11257 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11258 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11259 .ss0 = __KERNEL_DS, \
11260 .ss1 = __KERNEL_CS, \
11261 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11262 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11263 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11264
11265 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11266 -#define KSTK_TOP(info) \
11267 -({ \
11268 - unsigned long *__ptr = (unsigned long *)(info); \
11269 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11270 -})
11271 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11272
11273 /*
11274 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11275 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11276 #define task_pt_regs(task) \
11277 ({ \
11278 struct pt_regs *__regs__; \
11279 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11280 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11281 __regs__ - 1; \
11282 })
11283
11284 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11285 /*
11286 * User space process size. 47bits minus one guard page.
11287 */
11288 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11289 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11290
11291 /* This decides where the kernel will search for a free chunk of vm
11292 * space during mmap's.
11293 */
11294 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11295 - 0xc0000000 : 0xFFFFe000)
11296 + 0xc0000000 : 0xFFFFf000)
11297
11298 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11299 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11300 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11301 #define STACK_TOP_MAX TASK_SIZE_MAX
11302
11303 #define INIT_THREAD { \
11304 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11305 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11306 }
11307
11308 #define INIT_TSS { \
11309 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11310 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11311 }
11312
11313 /*
11314 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11315 */
11316 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11317
11318 +#ifdef CONFIG_PAX_SEGMEXEC
11319 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11320 +#endif
11321 +
11322 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11323
11324 /* Get/set a process' ability to use the timestamp counter instruction */
11325 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11326 index 3566454..4bdfb8c 100644
11327 --- a/arch/x86/include/asm/ptrace.h
11328 +++ b/arch/x86/include/asm/ptrace.h
11329 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11330 }
11331
11332 /*
11333 - * user_mode_vm(regs) determines whether a register set came from user mode.
11334 + * user_mode(regs) determines whether a register set came from user mode.
11335 * This is true if V8086 mode was enabled OR if the register set was from
11336 * protected mode with RPL-3 CS value. This tricky test checks that with
11337 * one comparison. Many places in the kernel can bypass this full check
11338 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11339 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11340 + * be used.
11341 */
11342 -static inline int user_mode(struct pt_regs *regs)
11343 +static inline int user_mode_novm(struct pt_regs *regs)
11344 {
11345 #ifdef CONFIG_X86_32
11346 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11347 #else
11348 - return !!(regs->cs & 3);
11349 + return !!(regs->cs & SEGMENT_RPL_MASK);
11350 #endif
11351 }
11352
11353 -static inline int user_mode_vm(struct pt_regs *regs)
11354 +static inline int user_mode(struct pt_regs *regs)
11355 {
11356 #ifdef CONFIG_X86_32
11357 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11358 USER_RPL;
11359 #else
11360 - return user_mode(regs);
11361 + return user_mode_novm(regs);
11362 #endif
11363 }
11364
11365 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11366 #ifdef CONFIG_X86_64
11367 static inline bool user_64bit_mode(struct pt_regs *regs)
11368 {
11369 + unsigned long cs = regs->cs & 0xffff;
11370 #ifndef CONFIG_PARAVIRT
11371 /*
11372 * On non-paravirt systems, this is the only long mode CPL 3
11373 * selector. We do not allow long mode selectors in the LDT.
11374 */
11375 - return regs->cs == __USER_CS;
11376 + return cs == __USER_CS;
11377 #else
11378 /* Headers are too twisted for this to go in paravirt.h. */
11379 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11380 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11381 #endif
11382 }
11383 #endif
11384 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11385 index 92f29706..a79cbbb 100644
11386 --- a/arch/x86/include/asm/reboot.h
11387 +++ b/arch/x86/include/asm/reboot.h
11388 @@ -6,19 +6,19 @@
11389 struct pt_regs;
11390
11391 struct machine_ops {
11392 - void (*restart)(char *cmd);
11393 - void (*halt)(void);
11394 - void (*power_off)(void);
11395 + void (* __noreturn restart)(char *cmd);
11396 + void (* __noreturn halt)(void);
11397 + void (* __noreturn power_off)(void);
11398 void (*shutdown)(void);
11399 void (*crash_shutdown)(struct pt_regs *);
11400 - void (*emergency_restart)(void);
11401 -};
11402 + void (* __noreturn emergency_restart)(void);
11403 +} __no_const;
11404
11405 extern struct machine_ops machine_ops;
11406
11407 void native_machine_crash_shutdown(struct pt_regs *regs);
11408 void native_machine_shutdown(void);
11409 -void machine_real_restart(unsigned int type);
11410 +void machine_real_restart(unsigned int type) __noreturn;
11411 /* These must match dispatch_table in reboot_32.S */
11412 #define MRR_BIOS 0
11413 #define MRR_APM 1
11414 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11415 index 2dbe4a7..ce1db00 100644
11416 --- a/arch/x86/include/asm/rwsem.h
11417 +++ b/arch/x86/include/asm/rwsem.h
11418 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11419 {
11420 asm volatile("# beginning down_read\n\t"
11421 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11422 +
11423 +#ifdef CONFIG_PAX_REFCOUNT
11424 + "jno 0f\n"
11425 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11426 + "int $4\n0:\n"
11427 + _ASM_EXTABLE(0b, 0b)
11428 +#endif
11429 +
11430 /* adds 0x00000001 */
11431 " jns 1f\n"
11432 " call call_rwsem_down_read_failed\n"
11433 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11434 "1:\n\t"
11435 " mov %1,%2\n\t"
11436 " add %3,%2\n\t"
11437 +
11438 +#ifdef CONFIG_PAX_REFCOUNT
11439 + "jno 0f\n"
11440 + "sub %3,%2\n"
11441 + "int $4\n0:\n"
11442 + _ASM_EXTABLE(0b, 0b)
11443 +#endif
11444 +
11445 " jle 2f\n\t"
11446 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11447 " jnz 1b\n\t"
11448 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11449 long tmp;
11450 asm volatile("# beginning down_write\n\t"
11451 LOCK_PREFIX " xadd %1,(%2)\n\t"
11452 +
11453 +#ifdef CONFIG_PAX_REFCOUNT
11454 + "jno 0f\n"
11455 + "mov %1,(%2)\n"
11456 + "int $4\n0:\n"
11457 + _ASM_EXTABLE(0b, 0b)
11458 +#endif
11459 +
11460 /* adds 0xffff0001, returns the old value */
11461 " test %1,%1\n\t"
11462 /* was the count 0 before? */
11463 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11464 long tmp;
11465 asm volatile("# beginning __up_read\n\t"
11466 LOCK_PREFIX " xadd %1,(%2)\n\t"
11467 +
11468 +#ifdef CONFIG_PAX_REFCOUNT
11469 + "jno 0f\n"
11470 + "mov %1,(%2)\n"
11471 + "int $4\n0:\n"
11472 + _ASM_EXTABLE(0b, 0b)
11473 +#endif
11474 +
11475 /* subtracts 1, returns the old value */
11476 " jns 1f\n\t"
11477 " call call_rwsem_wake\n" /* expects old value in %edx */
11478 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11479 long tmp;
11480 asm volatile("# beginning __up_write\n\t"
11481 LOCK_PREFIX " xadd %1,(%2)\n\t"
11482 +
11483 +#ifdef CONFIG_PAX_REFCOUNT
11484 + "jno 0f\n"
11485 + "mov %1,(%2)\n"
11486 + "int $4\n0:\n"
11487 + _ASM_EXTABLE(0b, 0b)
11488 +#endif
11489 +
11490 /* subtracts 0xffff0001, returns the old value */
11491 " jns 1f\n\t"
11492 " call call_rwsem_wake\n" /* expects old value in %edx */
11493 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11494 {
11495 asm volatile("# beginning __downgrade_write\n\t"
11496 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11497 +
11498 +#ifdef CONFIG_PAX_REFCOUNT
11499 + "jno 0f\n"
11500 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11501 + "int $4\n0:\n"
11502 + _ASM_EXTABLE(0b, 0b)
11503 +#endif
11504 +
11505 /*
11506 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11507 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11508 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11509 */
11510 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11511 {
11512 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11513 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11514 +
11515 +#ifdef CONFIG_PAX_REFCOUNT
11516 + "jno 0f\n"
11517 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11518 + "int $4\n0:\n"
11519 + _ASM_EXTABLE(0b, 0b)
11520 +#endif
11521 +
11522 : "+m" (sem->count)
11523 : "er" (delta));
11524 }
11525 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11526 */
11527 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11528 {
11529 - return delta + xadd(&sem->count, delta);
11530 + return delta + xadd_check_overflow(&sem->count, delta);
11531 }
11532
11533 #endif /* __KERNEL__ */
11534 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11535 index 5e64171..f58957e 100644
11536 --- a/arch/x86/include/asm/segment.h
11537 +++ b/arch/x86/include/asm/segment.h
11538 @@ -64,10 +64,15 @@
11539 * 26 - ESPFIX small SS
11540 * 27 - per-cpu [ offset to per-cpu data area ]
11541 * 28 - stack_canary-20 [ for stack protector ]
11542 - * 29 - unused
11543 - * 30 - unused
11544 + * 29 - PCI BIOS CS
11545 + * 30 - PCI BIOS DS
11546 * 31 - TSS for double fault handler
11547 */
11548 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11549 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11550 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11551 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11552 +
11553 #define GDT_ENTRY_TLS_MIN 6
11554 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11555
11556 @@ -79,6 +84,8 @@
11557
11558 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11559
11560 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11561 +
11562 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11563
11564 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11565 @@ -104,6 +111,12 @@
11566 #define __KERNEL_STACK_CANARY 0
11567 #endif
11568
11569 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11570 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11571 +
11572 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11573 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11574 +
11575 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11576
11577 /*
11578 @@ -141,7 +154,7 @@
11579 */
11580
11581 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11582 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11583 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11584
11585
11586 #else
11587 @@ -165,6 +178,8 @@
11588 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11589 #define __USER32_DS __USER_DS
11590
11591 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11592 +
11593 #define GDT_ENTRY_TSS 8 /* needs two entries */
11594 #define GDT_ENTRY_LDT 10 /* needs two entries */
11595 #define GDT_ENTRY_TLS_MIN 12
11596 @@ -185,6 +200,7 @@
11597 #endif
11598
11599 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11600 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11601 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11602 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11603 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11604 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11605 index 0434c40..1714bf0 100644
11606 --- a/arch/x86/include/asm/smp.h
11607 +++ b/arch/x86/include/asm/smp.h
11608 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11609 /* cpus sharing the last level cache: */
11610 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11611 DECLARE_PER_CPU(u16, cpu_llc_id);
11612 -DECLARE_PER_CPU(int, cpu_number);
11613 +DECLARE_PER_CPU(unsigned int, cpu_number);
11614
11615 static inline struct cpumask *cpu_sibling_mask(int cpu)
11616 {
11617 @@ -77,7 +77,7 @@ struct smp_ops {
11618
11619 void (*send_call_func_ipi)(const struct cpumask *mask);
11620 void (*send_call_func_single_ipi)(int cpu);
11621 -};
11622 +} __no_const;
11623
11624 /* Globals due to paravirt */
11625 extern void set_cpu_sibling_map(int cpu);
11626 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11627 extern int safe_smp_processor_id(void);
11628
11629 #elif defined(CONFIG_X86_64_SMP)
11630 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11631 -
11632 -#define stack_smp_processor_id() \
11633 -({ \
11634 - struct thread_info *ti; \
11635 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11636 - ti->cpu; \
11637 -})
11638 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11639 +#define stack_smp_processor_id() raw_smp_processor_id()
11640 #define safe_smp_processor_id() smp_processor_id()
11641
11642 #endif
11643 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11644 index a82c2bf..2198f61 100644
11645 --- a/arch/x86/include/asm/spinlock.h
11646 +++ b/arch/x86/include/asm/spinlock.h
11647 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11648 static inline void arch_read_lock(arch_rwlock_t *rw)
11649 {
11650 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11651 +
11652 +#ifdef CONFIG_PAX_REFCOUNT
11653 + "jno 0f\n"
11654 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11655 + "int $4\n0:\n"
11656 + _ASM_EXTABLE(0b, 0b)
11657 +#endif
11658 +
11659 "jns 1f\n"
11660 "call __read_lock_failed\n\t"
11661 "1:\n"
11662 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11663 static inline void arch_write_lock(arch_rwlock_t *rw)
11664 {
11665 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11666 +
11667 +#ifdef CONFIG_PAX_REFCOUNT
11668 + "jno 0f\n"
11669 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11670 + "int $4\n0:\n"
11671 + _ASM_EXTABLE(0b, 0b)
11672 +#endif
11673 +
11674 "jz 1f\n"
11675 "call __write_lock_failed\n\t"
11676 "1:\n"
11677 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11678
11679 static inline void arch_read_unlock(arch_rwlock_t *rw)
11680 {
11681 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11682 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11683 +
11684 +#ifdef CONFIG_PAX_REFCOUNT
11685 + "jno 0f\n"
11686 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11687 + "int $4\n0:\n"
11688 + _ASM_EXTABLE(0b, 0b)
11689 +#endif
11690 +
11691 :"+m" (rw->lock) : : "memory");
11692 }
11693
11694 static inline void arch_write_unlock(arch_rwlock_t *rw)
11695 {
11696 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11697 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11698 +
11699 +#ifdef CONFIG_PAX_REFCOUNT
11700 + "jno 0f\n"
11701 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11702 + "int $4\n0:\n"
11703 + _ASM_EXTABLE(0b, 0b)
11704 +#endif
11705 +
11706 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11707 }
11708
11709 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11710 index 1575177..cb23f52 100644
11711 --- a/arch/x86/include/asm/stackprotector.h
11712 +++ b/arch/x86/include/asm/stackprotector.h
11713 @@ -48,7 +48,7 @@
11714 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11715 */
11716 #define GDT_STACK_CANARY_INIT \
11717 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11718 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11719
11720 /*
11721 * Initialize the stackprotector canary value.
11722 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11723
11724 static inline void load_stack_canary_segment(void)
11725 {
11726 -#ifdef CONFIG_X86_32
11727 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11728 asm volatile ("mov %0, %%gs" : : "r" (0));
11729 #endif
11730 }
11731 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11732 index 70bbe39..4ae2bd4 100644
11733 --- a/arch/x86/include/asm/stacktrace.h
11734 +++ b/arch/x86/include/asm/stacktrace.h
11735 @@ -11,28 +11,20 @@
11736
11737 extern int kstack_depth_to_print;
11738
11739 -struct thread_info;
11740 +struct task_struct;
11741 struct stacktrace_ops;
11742
11743 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11744 - unsigned long *stack,
11745 - unsigned long bp,
11746 - const struct stacktrace_ops *ops,
11747 - void *data,
11748 - unsigned long *end,
11749 - int *graph);
11750 +typedef unsigned long walk_stack_t(struct task_struct *task,
11751 + void *stack_start,
11752 + unsigned long *stack,
11753 + unsigned long bp,
11754 + const struct stacktrace_ops *ops,
11755 + void *data,
11756 + unsigned long *end,
11757 + int *graph);
11758
11759 -extern unsigned long
11760 -print_context_stack(struct thread_info *tinfo,
11761 - unsigned long *stack, unsigned long bp,
11762 - const struct stacktrace_ops *ops, void *data,
11763 - unsigned long *end, int *graph);
11764 -
11765 -extern unsigned long
11766 -print_context_stack_bp(struct thread_info *tinfo,
11767 - unsigned long *stack, unsigned long bp,
11768 - const struct stacktrace_ops *ops, void *data,
11769 - unsigned long *end, int *graph);
11770 +extern walk_stack_t print_context_stack;
11771 +extern walk_stack_t print_context_stack_bp;
11772
11773 /* Generic stack tracer with callbacks */
11774
11775 @@ -40,7 +32,7 @@ struct stacktrace_ops {
11776 void (*address)(void *data, unsigned long address, int reliable);
11777 /* On negative return stop dumping */
11778 int (*stack)(void *data, char *name);
11779 - walk_stack_t walk_stack;
11780 + walk_stack_t *walk_stack;
11781 };
11782
11783 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11784 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11785 index cb23852..2dde194 100644
11786 --- a/arch/x86/include/asm/sys_ia32.h
11787 +++ b/arch/x86/include/asm/sys_ia32.h
11788 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11789 compat_sigset_t __user *, unsigned int);
11790 asmlinkage long sys32_alarm(unsigned int);
11791
11792 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11793 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11794 asmlinkage long sys32_sysfs(int, u32, u32);
11795
11796 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11797 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11798 index 2d2f01c..f985723 100644
11799 --- a/arch/x86/include/asm/system.h
11800 +++ b/arch/x86/include/asm/system.h
11801 @@ -129,7 +129,7 @@ do { \
11802 "call __switch_to\n\t" \
11803 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11804 __switch_canary \
11805 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11806 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11807 "movq %%rax,%%rdi\n\t" \
11808 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11809 "jnz ret_from_fork\n\t" \
11810 @@ -140,7 +140,7 @@ do { \
11811 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11812 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11813 [_tif_fork] "i" (_TIF_FORK), \
11814 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11815 + [thread_info] "m" (current_tinfo), \
11816 [current_task] "m" (current_task) \
11817 __switch_canary_iparam \
11818 : "memory", "cc" __EXTRA_CLOBBER)
11819 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11820 {
11821 unsigned long __limit;
11822 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11823 - return __limit + 1;
11824 + return __limit;
11825 }
11826
11827 static inline void native_clts(void)
11828 @@ -397,13 +397,13 @@ void enable_hlt(void);
11829
11830 void cpu_idle_wait(void);
11831
11832 -extern unsigned long arch_align_stack(unsigned long sp);
11833 +#define arch_align_stack(x) ((x) & ~0xfUL)
11834 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11835
11836 void default_idle(void);
11837 bool set_pm_idle_to_default(void);
11838
11839 -void stop_this_cpu(void *dummy);
11840 +void stop_this_cpu(void *dummy) __noreturn;
11841
11842 /*
11843 * Force strict CPU ordering.
11844 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11845 index cfd8144..1b1127d 100644
11846 --- a/arch/x86/include/asm/thread_info.h
11847 +++ b/arch/x86/include/asm/thread_info.h
11848 @@ -10,6 +10,7 @@
11849 #include <linux/compiler.h>
11850 #include <asm/page.h>
11851 #include <asm/types.h>
11852 +#include <asm/percpu.h>
11853
11854 /*
11855 * low level task data that entry.S needs immediate access to
11856 @@ -24,7 +25,6 @@ struct exec_domain;
11857 #include <linux/atomic.h>
11858
11859 struct thread_info {
11860 - struct task_struct *task; /* main task structure */
11861 struct exec_domain *exec_domain; /* execution domain */
11862 __u32 flags; /* low level flags */
11863 __u32 status; /* thread synchronous flags */
11864 @@ -34,19 +34,13 @@ struct thread_info {
11865 mm_segment_t addr_limit;
11866 struct restart_block restart_block;
11867 void __user *sysenter_return;
11868 -#ifdef CONFIG_X86_32
11869 - unsigned long previous_esp; /* ESP of the previous stack in
11870 - case of nested (IRQ) stacks
11871 - */
11872 - __u8 supervisor_stack[0];
11873 -#endif
11874 + unsigned long lowest_stack;
11875 unsigned int sig_on_uaccess_error:1;
11876 unsigned int uaccess_err:1; /* uaccess failed */
11877 };
11878
11879 -#define INIT_THREAD_INFO(tsk) \
11880 +#define INIT_THREAD_INFO \
11881 { \
11882 - .task = &tsk, \
11883 .exec_domain = &default_exec_domain, \
11884 .flags = 0, \
11885 .cpu = 0, \
11886 @@ -57,7 +51,7 @@ struct thread_info {
11887 }, \
11888 }
11889
11890 -#define init_thread_info (init_thread_union.thread_info)
11891 +#define init_thread_info (init_thread_union.stack)
11892 #define init_stack (init_thread_union.stack)
11893
11894 #else /* !__ASSEMBLY__ */
11895 @@ -169,45 +163,40 @@ struct thread_info {
11896 ret; \
11897 })
11898
11899 -#ifdef CONFIG_X86_32
11900 -
11901 -#define STACK_WARN (THREAD_SIZE/8)
11902 -/*
11903 - * macros/functions for gaining access to the thread information structure
11904 - *
11905 - * preempt_count needs to be 1 initially, until the scheduler is functional.
11906 - */
11907 -#ifndef __ASSEMBLY__
11908 -
11909 -
11910 -/* how to get the current stack pointer from C */
11911 -register unsigned long current_stack_pointer asm("esp") __used;
11912 -
11913 -/* how to get the thread information struct from C */
11914 -static inline struct thread_info *current_thread_info(void)
11915 -{
11916 - return (struct thread_info *)
11917 - (current_stack_pointer & ~(THREAD_SIZE - 1));
11918 -}
11919 -
11920 -#else /* !__ASSEMBLY__ */
11921 -
11922 +#ifdef __ASSEMBLY__
11923 /* how to get the thread information struct from ASM */
11924 #define GET_THREAD_INFO(reg) \
11925 - movl $-THREAD_SIZE, reg; \
11926 - andl %esp, reg
11927 + mov PER_CPU_VAR(current_tinfo), reg
11928
11929 /* use this one if reg already contains %esp */
11930 -#define GET_THREAD_INFO_WITH_ESP(reg) \
11931 - andl $-THREAD_SIZE, reg
11932 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
11933 +#else
11934 +/* how to get the thread information struct from C */
11935 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
11936 +
11937 +static __always_inline struct thread_info *current_thread_info(void)
11938 +{
11939 + return percpu_read_stable(current_tinfo);
11940 +}
11941 +#endif
11942 +
11943 +#ifdef CONFIG_X86_32
11944 +
11945 +#define STACK_WARN (THREAD_SIZE/8)
11946 +/*
11947 + * macros/functions for gaining access to the thread information structure
11948 + *
11949 + * preempt_count needs to be 1 initially, until the scheduler is functional.
11950 + */
11951 +#ifndef __ASSEMBLY__
11952 +
11953 +/* how to get the current stack pointer from C */
11954 +register unsigned long current_stack_pointer asm("esp") __used;
11955
11956 #endif
11957
11958 #else /* X86_32 */
11959
11960 -#include <asm/percpu.h>
11961 -#define KERNEL_STACK_OFFSET (5*8)
11962 -
11963 /*
11964 * macros/functions for gaining access to the thread information structure
11965 * preempt_count needs to be 1 initially, until the scheduler is functional.
11966 @@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void)
11967 #ifndef __ASSEMBLY__
11968 DECLARE_PER_CPU(unsigned long, kernel_stack);
11969
11970 -static inline struct thread_info *current_thread_info(void)
11971 -{
11972 - struct thread_info *ti;
11973 - ti = (void *)(percpu_read_stable(kernel_stack) +
11974 - KERNEL_STACK_OFFSET - THREAD_SIZE);
11975 - return ti;
11976 -}
11977 -
11978 -#else /* !__ASSEMBLY__ */
11979 -
11980 -/* how to get the thread information struct from ASM */
11981 -#define GET_THREAD_INFO(reg) \
11982 - movq PER_CPU_VAR(kernel_stack),reg ; \
11983 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
11984 -
11985 -/*
11986 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
11987 - * a certain register (to be used in assembler memory operands).
11988 - */
11989 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
11990 -
11991 +/* how to get the current stack pointer from C */
11992 +register unsigned long current_stack_pointer asm("rsp") __used;
11993 #endif
11994
11995 #endif /* !X86_32 */
11996 @@ -269,5 +239,16 @@ extern void arch_task_cache_init(void);
11997 extern void free_thread_info(struct thread_info *ti);
11998 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
11999 #define arch_task_cache_init arch_task_cache_init
12000 +
12001 +#define __HAVE_THREAD_FUNCTIONS
12002 +#define task_thread_info(task) (&(task)->tinfo)
12003 +#define task_stack_page(task) ((task)->stack)
12004 +#define setup_thread_stack(p, org) do {} while (0)
12005 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12006 +
12007 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12008 +extern struct task_struct *alloc_task_struct_node(int node);
12009 +extern void free_task_struct(struct task_struct *);
12010 +
12011 #endif
12012 #endif /* _ASM_X86_THREAD_INFO_H */
12013 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12014 index 8be5f54..7ae826d 100644
12015 --- a/arch/x86/include/asm/uaccess.h
12016 +++ b/arch/x86/include/asm/uaccess.h
12017 @@ -7,12 +7,15 @@
12018 #include <linux/compiler.h>
12019 #include <linux/thread_info.h>
12020 #include <linux/string.h>
12021 +#include <linux/sched.h>
12022 #include <asm/asm.h>
12023 #include <asm/page.h>
12024
12025 #define VERIFY_READ 0
12026 #define VERIFY_WRITE 1
12027
12028 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12029 +
12030 /*
12031 * The fs value determines whether argument validity checking should be
12032 * performed or not. If get_fs() == USER_DS, checking is performed, with
12033 @@ -28,7 +31,12 @@
12034
12035 #define get_ds() (KERNEL_DS)
12036 #define get_fs() (current_thread_info()->addr_limit)
12037 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12038 +void __set_fs(mm_segment_t x);
12039 +void set_fs(mm_segment_t x);
12040 +#else
12041 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12042 +#endif
12043
12044 #define segment_eq(a, b) ((a).seg == (b).seg)
12045
12046 @@ -76,7 +84,33 @@
12047 * checks that the pointer is in the user space range - after calling
12048 * this function, memory access functions may still return -EFAULT.
12049 */
12050 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12051 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12052 +#define access_ok(type, addr, size) \
12053 +({ \
12054 + long __size = size; \
12055 + unsigned long __addr = (unsigned long)addr; \
12056 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12057 + unsigned long __end_ao = __addr + __size - 1; \
12058 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12059 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12060 + while(__addr_ao <= __end_ao) { \
12061 + char __c_ao; \
12062 + __addr_ao += PAGE_SIZE; \
12063 + if (__size > PAGE_SIZE) \
12064 + cond_resched(); \
12065 + if (__get_user(__c_ao, (char __user *)__addr)) \
12066 + break; \
12067 + if (type != VERIFY_WRITE) { \
12068 + __addr = __addr_ao; \
12069 + continue; \
12070 + } \
12071 + if (__put_user(__c_ao, (char __user *)__addr)) \
12072 + break; \
12073 + __addr = __addr_ao; \
12074 + } \
12075 + } \
12076 + __ret_ao; \
12077 +})
12078
12079 /*
12080 * The exception table consists of pairs of addresses: the first is the
12081 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12082 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12083 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12084
12085 -
12086 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12087 +#define __copyuser_seg "gs;"
12088 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12089 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12090 +#else
12091 +#define __copyuser_seg
12092 +#define __COPYUSER_SET_ES
12093 +#define __COPYUSER_RESTORE_ES
12094 +#endif
12095
12096 #ifdef CONFIG_X86_32
12097 #define __put_user_asm_u64(x, addr, err, errret) \
12098 - asm volatile("1: movl %%eax,0(%2)\n" \
12099 - "2: movl %%edx,4(%2)\n" \
12100 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12101 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12102 "3:\n" \
12103 ".section .fixup,\"ax\"\n" \
12104 "4: movl %3,%0\n" \
12105 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12106 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12107
12108 #define __put_user_asm_ex_u64(x, addr) \
12109 - asm volatile("1: movl %%eax,0(%1)\n" \
12110 - "2: movl %%edx,4(%1)\n" \
12111 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12112 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12113 "3:\n" \
12114 _ASM_EXTABLE(1b, 2b - 1b) \
12115 _ASM_EXTABLE(2b, 3b - 2b) \
12116 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12117 __typeof__(*(ptr)) __pu_val; \
12118 __chk_user_ptr(ptr); \
12119 might_fault(); \
12120 - __pu_val = x; \
12121 + __pu_val = (x); \
12122 switch (sizeof(*(ptr))) { \
12123 case 1: \
12124 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12125 @@ -373,7 +415,7 @@ do { \
12126 } while (0)
12127
12128 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12129 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12130 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12131 "2:\n" \
12132 ".section .fixup,\"ax\"\n" \
12133 "3: mov %3,%0\n" \
12134 @@ -381,7 +423,7 @@ do { \
12135 " jmp 2b\n" \
12136 ".previous\n" \
12137 _ASM_EXTABLE(1b, 3b) \
12138 - : "=r" (err), ltype(x) \
12139 + : "=r" (err), ltype (x) \
12140 : "m" (__m(addr)), "i" (errret), "0" (err))
12141
12142 #define __get_user_size_ex(x, ptr, size) \
12143 @@ -406,7 +448,7 @@ do { \
12144 } while (0)
12145
12146 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12147 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12148 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12149 "2:\n" \
12150 _ASM_EXTABLE(1b, 2b - 1b) \
12151 : ltype(x) : "m" (__m(addr)))
12152 @@ -423,13 +465,24 @@ do { \
12153 int __gu_err; \
12154 unsigned long __gu_val; \
12155 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12156 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12157 + (x) = (__typeof__(*(ptr)))__gu_val; \
12158 __gu_err; \
12159 })
12160
12161 /* FIXME: this hack is definitely wrong -AK */
12162 struct __large_struct { unsigned long buf[100]; };
12163 -#define __m(x) (*(struct __large_struct __user *)(x))
12164 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12165 +#define ____m(x) \
12166 +({ \
12167 + unsigned long ____x = (unsigned long)(x); \
12168 + if (____x < PAX_USER_SHADOW_BASE) \
12169 + ____x += PAX_USER_SHADOW_BASE; \
12170 + (void __user *)____x; \
12171 +})
12172 +#else
12173 +#define ____m(x) (x)
12174 +#endif
12175 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12176
12177 /*
12178 * Tell gcc we read from memory instead of writing: this is because
12179 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12180 * aliasing issues.
12181 */
12182 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12183 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12184 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12185 "2:\n" \
12186 ".section .fixup,\"ax\"\n" \
12187 "3: mov %3,%0\n" \
12188 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12189 ".previous\n" \
12190 _ASM_EXTABLE(1b, 3b) \
12191 : "=r"(err) \
12192 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12193 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12194
12195 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12196 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12197 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12198 "2:\n" \
12199 _ASM_EXTABLE(1b, 2b - 1b) \
12200 : : ltype(x), "m" (__m(addr)))
12201 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12202 * On error, the variable @x is set to zero.
12203 */
12204
12205 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12206 +#define __get_user(x, ptr) get_user((x), (ptr))
12207 +#else
12208 #define __get_user(x, ptr) \
12209 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12210 +#endif
12211
12212 /**
12213 * __put_user: - Write a simple value into user space, with less checking.
12214 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12215 * Returns zero on success, or -EFAULT on error.
12216 */
12217
12218 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12219 +#define __put_user(x, ptr) put_user((x), (ptr))
12220 +#else
12221 #define __put_user(x, ptr) \
12222 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12223 +#endif
12224
12225 #define __get_user_unaligned __get_user
12226 #define __put_user_unaligned __put_user
12227 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12228 #define get_user_ex(x, ptr) do { \
12229 unsigned long __gue_val; \
12230 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12231 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12232 + (x) = (__typeof__(*(ptr)))__gue_val; \
12233 } while (0)
12234
12235 #ifdef CONFIG_X86_WP_WORKS_OK
12236 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12237 index 566e803..b9521e9 100644
12238 --- a/arch/x86/include/asm/uaccess_32.h
12239 +++ b/arch/x86/include/asm/uaccess_32.h
12240 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12241 static __always_inline unsigned long __must_check
12242 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12243 {
12244 + if ((long)n < 0)
12245 + return n;
12246 +
12247 if (__builtin_constant_p(n)) {
12248 unsigned long ret;
12249
12250 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12251 return ret;
12252 }
12253 }
12254 + if (!__builtin_constant_p(n))
12255 + check_object_size(from, n, true);
12256 return __copy_to_user_ll(to, from, n);
12257 }
12258
12259 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12260 __copy_to_user(void __user *to, const void *from, unsigned long n)
12261 {
12262 might_fault();
12263 +
12264 return __copy_to_user_inatomic(to, from, n);
12265 }
12266
12267 static __always_inline unsigned long
12268 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12269 {
12270 + if ((long)n < 0)
12271 + return n;
12272 +
12273 /* Avoid zeroing the tail if the copy fails..
12274 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12275 * but as the zeroing behaviour is only significant when n is not
12276 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12277 __copy_from_user(void *to, const void __user *from, unsigned long n)
12278 {
12279 might_fault();
12280 +
12281 + if ((long)n < 0)
12282 + return n;
12283 +
12284 if (__builtin_constant_p(n)) {
12285 unsigned long ret;
12286
12287 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12288 return ret;
12289 }
12290 }
12291 + if (!__builtin_constant_p(n))
12292 + check_object_size(to, n, false);
12293 return __copy_from_user_ll(to, from, n);
12294 }
12295
12296 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12297 const void __user *from, unsigned long n)
12298 {
12299 might_fault();
12300 +
12301 + if ((long)n < 0)
12302 + return n;
12303 +
12304 if (__builtin_constant_p(n)) {
12305 unsigned long ret;
12306
12307 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12308 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12309 unsigned long n)
12310 {
12311 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12312 + if ((long)n < 0)
12313 + return n;
12314 +
12315 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12316 }
12317
12318 -unsigned long __must_check copy_to_user(void __user *to,
12319 - const void *from, unsigned long n);
12320 -unsigned long __must_check _copy_from_user(void *to,
12321 - const void __user *from,
12322 - unsigned long n);
12323 -
12324 +extern void copy_to_user_overflow(void)
12325 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12326 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12327 +#else
12328 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12329 +#endif
12330 +;
12331
12332 extern void copy_from_user_overflow(void)
12333 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12334 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12335 #endif
12336 ;
12337
12338 -static inline unsigned long __must_check copy_from_user(void *to,
12339 - const void __user *from,
12340 - unsigned long n)
12341 +/**
12342 + * copy_to_user: - Copy a block of data into user space.
12343 + * @to: Destination address, in user space.
12344 + * @from: Source address, in kernel space.
12345 + * @n: Number of bytes to copy.
12346 + *
12347 + * Context: User context only. This function may sleep.
12348 + *
12349 + * Copy data from kernel space to user space.
12350 + *
12351 + * Returns number of bytes that could not be copied.
12352 + * On success, this will be zero.
12353 + */
12354 +static inline unsigned long __must_check
12355 +copy_to_user(void __user *to, const void *from, unsigned long n)
12356 +{
12357 + int sz = __compiletime_object_size(from);
12358 +
12359 + if (unlikely(sz != -1 && sz < n))
12360 + copy_to_user_overflow();
12361 + else if (access_ok(VERIFY_WRITE, to, n))
12362 + n = __copy_to_user(to, from, n);
12363 + return n;
12364 +}
12365 +
12366 +/**
12367 + * copy_from_user: - Copy a block of data from user space.
12368 + * @to: Destination address, in kernel space.
12369 + * @from: Source address, in user space.
12370 + * @n: Number of bytes to copy.
12371 + *
12372 + * Context: User context only. This function may sleep.
12373 + *
12374 + * Copy data from user space to kernel space.
12375 + *
12376 + * Returns number of bytes that could not be copied.
12377 + * On success, this will be zero.
12378 + *
12379 + * If some data could not be copied, this function will pad the copied
12380 + * data to the requested size using zero bytes.
12381 + */
12382 +static inline unsigned long __must_check
12383 +copy_from_user(void *to, const void __user *from, unsigned long n)
12384 {
12385 int sz = __compiletime_object_size(to);
12386
12387 - if (likely(sz == -1 || sz >= n))
12388 - n = _copy_from_user(to, from, n);
12389 - else
12390 + if (unlikely(sz != -1 && sz < n))
12391 copy_from_user_overflow();
12392 -
12393 + else if (access_ok(VERIFY_READ, from, n))
12394 + n = __copy_from_user(to, from, n);
12395 + else if ((long)n > 0) {
12396 + if (!__builtin_constant_p(n))
12397 + check_object_size(to, n, false);
12398 + memset(to, 0, n);
12399 + }
12400 return n;
12401 }
12402
12403 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12404 index 1c66d30..e66922c 100644
12405 --- a/arch/x86/include/asm/uaccess_64.h
12406 +++ b/arch/x86/include/asm/uaccess_64.h
12407 @@ -10,6 +10,9 @@
12408 #include <asm/alternative.h>
12409 #include <asm/cpufeature.h>
12410 #include <asm/page.h>
12411 +#include <asm/pgtable.h>
12412 +
12413 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12414
12415 /*
12416 * Copy To/From Userspace
12417 @@ -17,12 +20,12 @@
12418
12419 /* Handles exceptions in both to and from, but doesn't do access_ok */
12420 __must_check unsigned long
12421 -copy_user_generic_string(void *to, const void *from, unsigned len);
12422 +copy_user_generic_string(void *to, const void *from, unsigned long len);
12423 __must_check unsigned long
12424 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12425 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
12426
12427 static __always_inline __must_check unsigned long
12428 -copy_user_generic(void *to, const void *from, unsigned len)
12429 +copy_user_generic(void *to, const void *from, unsigned long len)
12430 {
12431 unsigned ret;
12432
12433 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
12434 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12435 "=d" (len)),
12436 "1" (to), "2" (from), "3" (len)
12437 - : "memory", "rcx", "r8", "r9", "r10", "r11");
12438 + : "memory", "rcx", "r8", "r9", "r11");
12439 return ret;
12440 }
12441
12442 +static __always_inline __must_check unsigned long
12443 +__copy_to_user(void __user *to, const void *from, unsigned long len);
12444 +static __always_inline __must_check unsigned long
12445 +__copy_from_user(void *to, const void __user *from, unsigned long len);
12446 __must_check unsigned long
12447 -_copy_to_user(void __user *to, const void *from, unsigned len);
12448 -__must_check unsigned long
12449 -_copy_from_user(void *to, const void __user *from, unsigned len);
12450 -__must_check unsigned long
12451 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12452 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12453
12454 static inline unsigned long __must_check copy_from_user(void *to,
12455 const void __user *from,
12456 unsigned long n)
12457 {
12458 - int sz = __compiletime_object_size(to);
12459 -
12460 might_fault();
12461 - if (likely(sz == -1 || sz >= n))
12462 - n = _copy_from_user(to, from, n);
12463 -#ifdef CONFIG_DEBUG_VM
12464 - else
12465 - WARN(1, "Buffer overflow detected!\n");
12466 -#endif
12467 +
12468 + if (access_ok(VERIFY_READ, from, n))
12469 + n = __copy_from_user(to, from, n);
12470 + else if (n < INT_MAX) {
12471 + if (!__builtin_constant_p(n))
12472 + check_object_size(to, n, false);
12473 + memset(to, 0, n);
12474 + }
12475 return n;
12476 }
12477
12478 static __always_inline __must_check
12479 -int copy_to_user(void __user *dst, const void *src, unsigned size)
12480 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
12481 {
12482 might_fault();
12483
12484 - return _copy_to_user(dst, src, size);
12485 + if (access_ok(VERIFY_WRITE, dst, size))
12486 + size = __copy_to_user(dst, src, size);
12487 + return size;
12488 }
12489
12490 static __always_inline __must_check
12491 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12492 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12493 {
12494 - int ret = 0;
12495 + int sz = __compiletime_object_size(dst);
12496 + unsigned ret = 0;
12497
12498 might_fault();
12499 - if (!__builtin_constant_p(size))
12500 - return copy_user_generic(dst, (__force void *)src, size);
12501 +
12502 + if (size > INT_MAX)
12503 + return size;
12504 +
12505 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12506 + if (!__access_ok(VERIFY_READ, src, size))
12507 + return size;
12508 +#endif
12509 +
12510 + if (unlikely(sz != -1 && sz < size)) {
12511 +#ifdef CONFIG_DEBUG_VM
12512 + WARN(1, "Buffer overflow detected!\n");
12513 +#endif
12514 + return size;
12515 + }
12516 +
12517 + if (!__builtin_constant_p(size)) {
12518 + check_object_size(dst, size, false);
12519 +
12520 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12521 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12522 + src += PAX_USER_SHADOW_BASE;
12523 +#endif
12524 +
12525 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12526 + }
12527 switch (size) {
12528 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12529 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12530 ret, "b", "b", "=q", 1);
12531 return ret;
12532 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12533 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12534 ret, "w", "w", "=r", 2);
12535 return ret;
12536 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12537 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12538 ret, "l", "k", "=r", 4);
12539 return ret;
12540 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12541 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12542 ret, "q", "", "=r", 8);
12543 return ret;
12544 case 10:
12545 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12546 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12547 ret, "q", "", "=r", 10);
12548 if (unlikely(ret))
12549 return ret;
12550 __get_user_asm(*(u16 *)(8 + (char *)dst),
12551 - (u16 __user *)(8 + (char __user *)src),
12552 + (const u16 __user *)(8 + (const char __user *)src),
12553 ret, "w", "w", "=r", 2);
12554 return ret;
12555 case 16:
12556 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12557 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12558 ret, "q", "", "=r", 16);
12559 if (unlikely(ret))
12560 return ret;
12561 __get_user_asm(*(u64 *)(8 + (char *)dst),
12562 - (u64 __user *)(8 + (char __user *)src),
12563 + (const u64 __user *)(8 + (const char __user *)src),
12564 ret, "q", "", "=r", 8);
12565 return ret;
12566 default:
12567 - return copy_user_generic(dst, (__force void *)src, size);
12568 +
12569 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12570 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12571 + src += PAX_USER_SHADOW_BASE;
12572 +#endif
12573 +
12574 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12575 }
12576 }
12577
12578 static __always_inline __must_check
12579 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12580 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12581 {
12582 - int ret = 0;
12583 + int sz = __compiletime_object_size(src);
12584 + unsigned ret = 0;
12585
12586 might_fault();
12587 - if (!__builtin_constant_p(size))
12588 - return copy_user_generic((__force void *)dst, src, size);
12589 +
12590 + if (size > INT_MAX)
12591 + return size;
12592 +
12593 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12594 + if (!__access_ok(VERIFY_WRITE, dst, size))
12595 + return size;
12596 +#endif
12597 +
12598 + if (unlikely(sz != -1 && sz < size)) {
12599 +#ifdef CONFIG_DEBUG_VM
12600 + WARN(1, "Buffer overflow detected!\n");
12601 +#endif
12602 + return size;
12603 + }
12604 +
12605 + if (!__builtin_constant_p(size)) {
12606 + check_object_size(src, size, true);
12607 +
12608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12609 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12610 + dst += PAX_USER_SHADOW_BASE;
12611 +#endif
12612 +
12613 + return copy_user_generic((__force_kernel void *)dst, src, size);
12614 + }
12615 switch (size) {
12616 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12617 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12618 ret, "b", "b", "iq", 1);
12619 return ret;
12620 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12621 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12622 ret, "w", "w", "ir", 2);
12623 return ret;
12624 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12625 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12626 ret, "l", "k", "ir", 4);
12627 return ret;
12628 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12629 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12630 ret, "q", "", "er", 8);
12631 return ret;
12632 case 10:
12633 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12634 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12635 ret, "q", "", "er", 10);
12636 if (unlikely(ret))
12637 return ret;
12638 asm("":::"memory");
12639 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12640 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12641 ret, "w", "w", "ir", 2);
12642 return ret;
12643 case 16:
12644 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12645 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12646 ret, "q", "", "er", 16);
12647 if (unlikely(ret))
12648 return ret;
12649 asm("":::"memory");
12650 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12651 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12652 ret, "q", "", "er", 8);
12653 return ret;
12654 default:
12655 - return copy_user_generic((__force void *)dst, src, size);
12656 +
12657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12658 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659 + dst += PAX_USER_SHADOW_BASE;
12660 +#endif
12661 +
12662 + return copy_user_generic((__force_kernel void *)dst, src, size);
12663 }
12664 }
12665
12666 static __always_inline __must_check
12667 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12668 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12669 {
12670 - int ret = 0;
12671 + unsigned ret = 0;
12672
12673 might_fault();
12674 - if (!__builtin_constant_p(size))
12675 - return copy_user_generic((__force void *)dst,
12676 - (__force void *)src, size);
12677 +
12678 + if (size > INT_MAX)
12679 + return size;
12680 +
12681 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12682 + if (!__access_ok(VERIFY_READ, src, size))
12683 + return size;
12684 + if (!__access_ok(VERIFY_WRITE, dst, size))
12685 + return size;
12686 +#endif
12687 +
12688 + if (!__builtin_constant_p(size)) {
12689 +
12690 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12691 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12692 + src += PAX_USER_SHADOW_BASE;
12693 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12694 + dst += PAX_USER_SHADOW_BASE;
12695 +#endif
12696 +
12697 + return copy_user_generic((__force_kernel void *)dst,
12698 + (__force_kernel const void *)src, size);
12699 + }
12700 switch (size) {
12701 case 1: {
12702 u8 tmp;
12703 - __get_user_asm(tmp, (u8 __user *)src,
12704 + __get_user_asm(tmp, (const u8 __user *)src,
12705 ret, "b", "b", "=q", 1);
12706 if (likely(!ret))
12707 __put_user_asm(tmp, (u8 __user *)dst,
12708 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12709 }
12710 case 2: {
12711 u16 tmp;
12712 - __get_user_asm(tmp, (u16 __user *)src,
12713 + __get_user_asm(tmp, (const u16 __user *)src,
12714 ret, "w", "w", "=r", 2);
12715 if (likely(!ret))
12716 __put_user_asm(tmp, (u16 __user *)dst,
12717 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12718
12719 case 4: {
12720 u32 tmp;
12721 - __get_user_asm(tmp, (u32 __user *)src,
12722 + __get_user_asm(tmp, (const u32 __user *)src,
12723 ret, "l", "k", "=r", 4);
12724 if (likely(!ret))
12725 __put_user_asm(tmp, (u32 __user *)dst,
12726 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12727 }
12728 case 8: {
12729 u64 tmp;
12730 - __get_user_asm(tmp, (u64 __user *)src,
12731 + __get_user_asm(tmp, (const u64 __user *)src,
12732 ret, "q", "", "=r", 8);
12733 if (likely(!ret))
12734 __put_user_asm(tmp, (u64 __user *)dst,
12735 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12736 return ret;
12737 }
12738 default:
12739 - return copy_user_generic((__force void *)dst,
12740 - (__force void *)src, size);
12741 +
12742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12743 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12744 + src += PAX_USER_SHADOW_BASE;
12745 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12746 + dst += PAX_USER_SHADOW_BASE;
12747 +#endif
12748 +
12749 + return copy_user_generic((__force_kernel void *)dst,
12750 + (__force_kernel const void *)src, size);
12751 }
12752 }
12753
12754 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12755 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12756
12757 static __must_check __always_inline int
12758 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12759 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12760 {
12761 - return copy_user_generic(dst, (__force const void *)src, size);
12762 + if (size > INT_MAX)
12763 + return size;
12764 +
12765 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12766 + if (!__access_ok(VERIFY_READ, src, size))
12767 + return size;
12768 +
12769 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12770 + src += PAX_USER_SHADOW_BASE;
12771 +#endif
12772 +
12773 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12774 }
12775
12776 -static __must_check __always_inline int
12777 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12778 +static __must_check __always_inline unsigned long
12779 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12780 {
12781 - return copy_user_generic((__force void *)dst, src, size);
12782 + if (size > INT_MAX)
12783 + return size;
12784 +
12785 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12786 + if (!__access_ok(VERIFY_WRITE, dst, size))
12787 + return size;
12788 +
12789 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12790 + dst += PAX_USER_SHADOW_BASE;
12791 +#endif
12792 +
12793 + return copy_user_generic((__force_kernel void *)dst, src, size);
12794 }
12795
12796 -extern long __copy_user_nocache(void *dst, const void __user *src,
12797 - unsigned size, int zerorest);
12798 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12799 + unsigned long size, int zerorest);
12800
12801 -static inline int
12802 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12803 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12804 {
12805 might_sleep();
12806 +
12807 + if (size > INT_MAX)
12808 + return size;
12809 +
12810 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12811 + if (!__access_ok(VERIFY_READ, src, size))
12812 + return size;
12813 +#endif
12814 +
12815 return __copy_user_nocache(dst, src, size, 1);
12816 }
12817
12818 -static inline int
12819 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12820 - unsigned size)
12821 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12822 + unsigned long size)
12823 {
12824 + if (size > INT_MAX)
12825 + return size;
12826 +
12827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12828 + if (!__access_ok(VERIFY_READ, src, size))
12829 + return size;
12830 +#endif
12831 +
12832 return __copy_user_nocache(dst, src, size, 0);
12833 }
12834
12835 -unsigned long
12836 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12837 +extern unsigned long
12838 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12839
12840 #endif /* _ASM_X86_UACCESS_64_H */
12841 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12842 index bb05228..d763d5b 100644
12843 --- a/arch/x86/include/asm/vdso.h
12844 +++ b/arch/x86/include/asm/vdso.h
12845 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
12846 #define VDSO32_SYMBOL(base, name) \
12847 ({ \
12848 extern const char VDSO32_##name[]; \
12849 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12850 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12851 })
12852 #endif
12853
12854 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12855 index 517d476..a1cb4d9 100644
12856 --- a/arch/x86/include/asm/x86_init.h
12857 +++ b/arch/x86/include/asm/x86_init.h
12858 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
12859 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12860 void (*find_smp_config)(void);
12861 void (*get_smp_config)(unsigned int early);
12862 -};
12863 +} __no_const;
12864
12865 /**
12866 * struct x86_init_resources - platform specific resource related ops
12867 @@ -43,7 +43,7 @@ struct x86_init_resources {
12868 void (*probe_roms)(void);
12869 void (*reserve_resources)(void);
12870 char *(*memory_setup)(void);
12871 -};
12872 +} __no_const;
12873
12874 /**
12875 * struct x86_init_irqs - platform specific interrupt setup
12876 @@ -56,7 +56,7 @@ struct x86_init_irqs {
12877 void (*pre_vector_init)(void);
12878 void (*intr_init)(void);
12879 void (*trap_init)(void);
12880 -};
12881 +} __no_const;
12882
12883 /**
12884 * struct x86_init_oem - oem platform specific customizing functions
12885 @@ -66,7 +66,7 @@ struct x86_init_irqs {
12886 struct x86_init_oem {
12887 void (*arch_setup)(void);
12888 void (*banner)(void);
12889 -};
12890 +} __no_const;
12891
12892 /**
12893 * struct x86_init_mapping - platform specific initial kernel pagetable setup
12894 @@ -77,7 +77,7 @@ struct x86_init_oem {
12895 */
12896 struct x86_init_mapping {
12897 void (*pagetable_reserve)(u64 start, u64 end);
12898 -};
12899 +} __no_const;
12900
12901 /**
12902 * struct x86_init_paging - platform specific paging functions
12903 @@ -87,7 +87,7 @@ struct x86_init_mapping {
12904 struct x86_init_paging {
12905 void (*pagetable_setup_start)(pgd_t *base);
12906 void (*pagetable_setup_done)(pgd_t *base);
12907 -};
12908 +} __no_const;
12909
12910 /**
12911 * struct x86_init_timers - platform specific timer setup
12912 @@ -102,7 +102,7 @@ struct x86_init_timers {
12913 void (*tsc_pre_init)(void);
12914 void (*timer_init)(void);
12915 void (*wallclock_init)(void);
12916 -};
12917 +} __no_const;
12918
12919 /**
12920 * struct x86_init_iommu - platform specific iommu setup
12921 @@ -110,7 +110,7 @@ struct x86_init_timers {
12922 */
12923 struct x86_init_iommu {
12924 int (*iommu_init)(void);
12925 -};
12926 +} __no_const;
12927
12928 /**
12929 * struct x86_init_pci - platform specific pci init functions
12930 @@ -124,7 +124,7 @@ struct x86_init_pci {
12931 int (*init)(void);
12932 void (*init_irq)(void);
12933 void (*fixup_irqs)(void);
12934 -};
12935 +} __no_const;
12936
12937 /**
12938 * struct x86_init_ops - functions for platform specific setup
12939 @@ -140,7 +140,7 @@ struct x86_init_ops {
12940 struct x86_init_timers timers;
12941 struct x86_init_iommu iommu;
12942 struct x86_init_pci pci;
12943 -};
12944 +} __no_const;
12945
12946 /**
12947 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
12948 @@ -149,7 +149,7 @@ struct x86_init_ops {
12949 struct x86_cpuinit_ops {
12950 void (*setup_percpu_clockev)(void);
12951 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
12952 -};
12953 +} __no_const;
12954
12955 /**
12956 * struct x86_platform_ops - platform specific runtime functions
12957 @@ -171,7 +171,7 @@ struct x86_platform_ops {
12958 void (*nmi_init)(void);
12959 unsigned char (*get_nmi_reason)(void);
12960 int (*i8042_detect)(void);
12961 -};
12962 +} __no_const;
12963
12964 struct pci_dev;
12965
12966 @@ -180,7 +180,7 @@ struct x86_msi_ops {
12967 void (*teardown_msi_irq)(unsigned int irq);
12968 void (*teardown_msi_irqs)(struct pci_dev *dev);
12969 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
12970 -};
12971 +} __no_const;
12972
12973 extern struct x86_init_ops x86_init;
12974 extern struct x86_cpuinit_ops x86_cpuinit;
12975 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
12976 index c6ce245..ffbdab7 100644
12977 --- a/arch/x86/include/asm/xsave.h
12978 +++ b/arch/x86/include/asm/xsave.h
12979 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12980 {
12981 int err;
12982
12983 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12984 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
12985 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
12986 +#endif
12987 +
12988 /*
12989 * Clear the xsave header first, so that reserved fields are
12990 * initialized to zero.
12991 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12992 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
12993 {
12994 int err;
12995 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
12996 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
12997 u32 lmask = mask;
12998 u32 hmask = mask >> 32;
12999
13000 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13001 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13002 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13003 +#endif
13004 +
13005 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13006 "2:\n"
13007 ".section .fixup,\"ax\"\n"
13008 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13009 index 6a564ac..9b1340c 100644
13010 --- a/arch/x86/kernel/acpi/realmode/Makefile
13011 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13012 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13013 $(call cc-option, -fno-stack-protector) \
13014 $(call cc-option, -mpreferred-stack-boundary=2)
13015 KBUILD_CFLAGS += $(call cc-option, -m32)
13016 +ifdef CONSTIFY_PLUGIN
13017 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13018 +endif
13019 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13020 GCOV_PROFILE := n
13021
13022 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13023 index b4fd836..4358fe3 100644
13024 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13025 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13026 @@ -108,6 +108,9 @@ wakeup_code:
13027 /* Do any other stuff... */
13028
13029 #ifndef CONFIG_64BIT
13030 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13031 + call verify_cpu
13032 +
13033 /* This could also be done in C code... */
13034 movl pmode_cr3, %eax
13035 movl %eax, %cr3
13036 @@ -131,6 +134,7 @@ wakeup_code:
13037 movl pmode_cr0, %eax
13038 movl %eax, %cr0
13039 jmp pmode_return
13040 +# include "../../verify_cpu.S"
13041 #else
13042 pushw $0
13043 pushw trampoline_segment
13044 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13045 index 103b6ab..2004d0a 100644
13046 --- a/arch/x86/kernel/acpi/sleep.c
13047 +++ b/arch/x86/kernel/acpi/sleep.c
13048 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13049 header->trampoline_segment = trampoline_address() >> 4;
13050 #ifdef CONFIG_SMP
13051 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13052 +
13053 + pax_open_kernel();
13054 early_gdt_descr.address =
13055 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13056 + pax_close_kernel();
13057 +
13058 initial_gs = per_cpu_offset(smp_processor_id());
13059 #endif
13060 initial_code = (unsigned long)wakeup_long64;
13061 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13062 index 13ab720..95d5442 100644
13063 --- a/arch/x86/kernel/acpi/wakeup_32.S
13064 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13065 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13066 # and restore the stack ... but you need gdt for this to work
13067 movl saved_context_esp, %esp
13068
13069 - movl %cs:saved_magic, %eax
13070 - cmpl $0x12345678, %eax
13071 + cmpl $0x12345678, saved_magic
13072 jne bogus_magic
13073
13074 # jump to place where we left off
13075 - movl saved_eip, %eax
13076 - jmp *%eax
13077 + jmp *(saved_eip)
13078
13079 bogus_magic:
13080 jmp bogus_magic
13081 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13082 index 1f84794..e23f862 100644
13083 --- a/arch/x86/kernel/alternative.c
13084 +++ b/arch/x86/kernel/alternative.c
13085 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13086 */
13087 for (a = start; a < end; a++) {
13088 instr = (u8 *)&a->instr_offset + a->instr_offset;
13089 +
13090 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13091 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13092 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13093 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13094 +#endif
13095 +
13096 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13097 BUG_ON(a->replacementlen > a->instrlen);
13098 BUG_ON(a->instrlen > sizeof(insnbuf));
13099 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13100 for (poff = start; poff < end; poff++) {
13101 u8 *ptr = (u8 *)poff + *poff;
13102
13103 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13104 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13105 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13106 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13107 +#endif
13108 +
13109 if (!*poff || ptr < text || ptr >= text_end)
13110 continue;
13111 /* turn DS segment override prefix into lock prefix */
13112 - if (*ptr == 0x3e)
13113 + if (*ktla_ktva(ptr) == 0x3e)
13114 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13115 };
13116 mutex_unlock(&text_mutex);
13117 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13118 for (poff = start; poff < end; poff++) {
13119 u8 *ptr = (u8 *)poff + *poff;
13120
13121 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13122 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13123 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13124 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13125 +#endif
13126 +
13127 if (!*poff || ptr < text || ptr >= text_end)
13128 continue;
13129 /* turn lock prefix into DS segment override prefix */
13130 - if (*ptr == 0xf0)
13131 + if (*ktla_ktva(ptr) == 0xf0)
13132 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13133 };
13134 mutex_unlock(&text_mutex);
13135 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13136
13137 BUG_ON(p->len > MAX_PATCH_LEN);
13138 /* prep the buffer with the original instructions */
13139 - memcpy(insnbuf, p->instr, p->len);
13140 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13141 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13142 (unsigned long)p->instr, p->len);
13143
13144 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13145 if (smp_alt_once)
13146 free_init_pages("SMP alternatives",
13147 (unsigned long)__smp_locks,
13148 - (unsigned long)__smp_locks_end);
13149 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13150
13151 restart_nmi();
13152 }
13153 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13154 * instructions. And on the local CPU you need to be protected again NMI or MCE
13155 * handlers seeing an inconsistent instruction while you patch.
13156 */
13157 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13158 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13159 size_t len)
13160 {
13161 unsigned long flags;
13162 local_irq_save(flags);
13163 - memcpy(addr, opcode, len);
13164 +
13165 + pax_open_kernel();
13166 + memcpy(ktla_ktva(addr), opcode, len);
13167 sync_core();
13168 + pax_close_kernel();
13169 +
13170 local_irq_restore(flags);
13171 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13172 that causes hangs on some VIA CPUs. */
13173 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13174 */
13175 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13176 {
13177 - unsigned long flags;
13178 - char *vaddr;
13179 + unsigned char *vaddr = ktla_ktva(addr);
13180 struct page *pages[2];
13181 - int i;
13182 + size_t i;
13183
13184 if (!core_kernel_text((unsigned long)addr)) {
13185 - pages[0] = vmalloc_to_page(addr);
13186 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13187 + pages[0] = vmalloc_to_page(vaddr);
13188 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13189 } else {
13190 - pages[0] = virt_to_page(addr);
13191 + pages[0] = virt_to_page(vaddr);
13192 WARN_ON(!PageReserved(pages[0]));
13193 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13194 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13195 }
13196 BUG_ON(!pages[0]);
13197 - local_irq_save(flags);
13198 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13199 - if (pages[1])
13200 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13201 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13202 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13203 - clear_fixmap(FIX_TEXT_POKE0);
13204 - if (pages[1])
13205 - clear_fixmap(FIX_TEXT_POKE1);
13206 - local_flush_tlb();
13207 - sync_core();
13208 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13209 - that causes hangs on some VIA CPUs. */
13210 + text_poke_early(addr, opcode, len);
13211 for (i = 0; i < len; i++)
13212 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13213 - local_irq_restore(flags);
13214 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13215 return addr;
13216 }
13217
13218 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13219 index 2eec05b..fef012b 100644
13220 --- a/arch/x86/kernel/apic/apic.c
13221 +++ b/arch/x86/kernel/apic/apic.c
13222 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13223 /*
13224 * Debug level, exported for io_apic.c
13225 */
13226 -unsigned int apic_verbosity;
13227 +int apic_verbosity;
13228
13229 int pic_mode;
13230
13231 @@ -1908,7 +1908,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13232 apic_write(APIC_ESR, 0);
13233 v1 = apic_read(APIC_ESR);
13234 ack_APIC_irq();
13235 - atomic_inc(&irq_err_count);
13236 + atomic_inc_unchecked(&irq_err_count);
13237
13238 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13239 smp_processor_id(), v0 , v1);
13240 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13241 index fb07275..e06bb59 100644
13242 --- a/arch/x86/kernel/apic/io_apic.c
13243 +++ b/arch/x86/kernel/apic/io_apic.c
13244 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13245 }
13246 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13247
13248 -void lock_vector_lock(void)
13249 +void lock_vector_lock(void) __acquires(vector_lock)
13250 {
13251 /* Used to the online set of cpus does not change
13252 * during assign_irq_vector.
13253 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13254 raw_spin_lock(&vector_lock);
13255 }
13256
13257 -void unlock_vector_lock(void)
13258 +void unlock_vector_lock(void) __releases(vector_lock)
13259 {
13260 raw_spin_unlock(&vector_lock);
13261 }
13262 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13263 ack_APIC_irq();
13264 }
13265
13266 -atomic_t irq_mis_count;
13267 +atomic_unchecked_t irq_mis_count;
13268
13269 static void ack_apic_level(struct irq_data *data)
13270 {
13271 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13272 * at the cpu.
13273 */
13274 if (!(v & (1 << (i & 0x1f)))) {
13275 - atomic_inc(&irq_mis_count);
13276 + atomic_inc_unchecked(&irq_mis_count);
13277
13278 eoi_ioapic_irq(irq, cfg);
13279 }
13280 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13281 index f76623c..aab694f 100644
13282 --- a/arch/x86/kernel/apm_32.c
13283 +++ b/arch/x86/kernel/apm_32.c
13284 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13285 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13286 * even though they are called in protected mode.
13287 */
13288 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13289 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13290 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13291
13292 static const char driver_version[] = "1.16ac"; /* no spaces */
13293 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13294 BUG_ON(cpu != 0);
13295 gdt = get_cpu_gdt_table(cpu);
13296 save_desc_40 = gdt[0x40 / 8];
13297 +
13298 + pax_open_kernel();
13299 gdt[0x40 / 8] = bad_bios_desc;
13300 + pax_close_kernel();
13301
13302 apm_irq_save(flags);
13303 APM_DO_SAVE_SEGS;
13304 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13305 &call->esi);
13306 APM_DO_RESTORE_SEGS;
13307 apm_irq_restore(flags);
13308 +
13309 + pax_open_kernel();
13310 gdt[0x40 / 8] = save_desc_40;
13311 + pax_close_kernel();
13312 +
13313 put_cpu();
13314
13315 return call->eax & 0xff;
13316 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13317 BUG_ON(cpu != 0);
13318 gdt = get_cpu_gdt_table(cpu);
13319 save_desc_40 = gdt[0x40 / 8];
13320 +
13321 + pax_open_kernel();
13322 gdt[0x40 / 8] = bad_bios_desc;
13323 + pax_close_kernel();
13324
13325 apm_irq_save(flags);
13326 APM_DO_SAVE_SEGS;
13327 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13328 &call->eax);
13329 APM_DO_RESTORE_SEGS;
13330 apm_irq_restore(flags);
13331 +
13332 + pax_open_kernel();
13333 gdt[0x40 / 8] = save_desc_40;
13334 + pax_close_kernel();
13335 +
13336 put_cpu();
13337 return error;
13338 }
13339 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13340 * code to that CPU.
13341 */
13342 gdt = get_cpu_gdt_table(0);
13343 +
13344 + pax_open_kernel();
13345 set_desc_base(&gdt[APM_CS >> 3],
13346 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13347 set_desc_base(&gdt[APM_CS_16 >> 3],
13348 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13349 set_desc_base(&gdt[APM_DS >> 3],
13350 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13351 + pax_close_kernel();
13352
13353 proc_create("apm", 0, NULL, &apm_file_ops);
13354
13355 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13356 index 68de2dc..1f3c720 100644
13357 --- a/arch/x86/kernel/asm-offsets.c
13358 +++ b/arch/x86/kernel/asm-offsets.c
13359 @@ -33,6 +33,8 @@ void common(void) {
13360 OFFSET(TI_status, thread_info, status);
13361 OFFSET(TI_addr_limit, thread_info, addr_limit);
13362 OFFSET(TI_preempt_count, thread_info, preempt_count);
13363 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13364 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13365
13366 BLANK();
13367 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13368 @@ -53,8 +55,26 @@ void common(void) {
13369 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13370 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13371 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13372 +
13373 +#ifdef CONFIG_PAX_KERNEXEC
13374 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13375 #endif
13376
13377 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13378 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13379 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13380 +#ifdef CONFIG_X86_64
13381 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13382 +#endif
13383 +#endif
13384 +
13385 +#endif
13386 +
13387 + BLANK();
13388 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13389 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13390 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13391 +
13392 #ifdef CONFIG_XEN
13393 BLANK();
13394 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13395 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13396 index 834e897..dacddc8 100644
13397 --- a/arch/x86/kernel/asm-offsets_64.c
13398 +++ b/arch/x86/kernel/asm-offsets_64.c
13399 @@ -70,6 +70,7 @@ int main(void)
13400 BLANK();
13401 #undef ENTRY
13402
13403 + DEFINE(TSS_size, sizeof(struct tss_struct));
13404 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13405 BLANK();
13406
13407 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13408 index 25f24dc..4094a7f 100644
13409 --- a/arch/x86/kernel/cpu/Makefile
13410 +++ b/arch/x86/kernel/cpu/Makefile
13411 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13412 CFLAGS_REMOVE_perf_event.o = -pg
13413 endif
13414
13415 -# Make sure load_percpu_segment has no stackprotector
13416 -nostackp := $(call cc-option, -fno-stack-protector)
13417 -CFLAGS_common.o := $(nostackp)
13418 -
13419 obj-y := intel_cacheinfo.o scattered.o topology.o
13420 obj-y += proc.o capflags.o powerflags.o common.o
13421 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13422 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13423 index f4773f4..b3fb13c 100644
13424 --- a/arch/x86/kernel/cpu/amd.c
13425 +++ b/arch/x86/kernel/cpu/amd.c
13426 @@ -669,7 +669,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13427 unsigned int size)
13428 {
13429 /* AMD errata T13 (order #21922) */
13430 - if ((c->x86 == 6)) {
13431 + if (c->x86 == 6) {
13432 /* Duron Rev A0 */
13433 if (c->x86_model == 3 && c->x86_mask == 0)
13434 size = 64;
13435 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13436 index c0f7d68..aa418f9 100644
13437 --- a/arch/x86/kernel/cpu/common.c
13438 +++ b/arch/x86/kernel/cpu/common.c
13439 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13440
13441 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13442
13443 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13444 -#ifdef CONFIG_X86_64
13445 - /*
13446 - * We need valid kernel segments for data and code in long mode too
13447 - * IRET will check the segment types kkeil 2000/10/28
13448 - * Also sysret mandates a special GDT layout
13449 - *
13450 - * TLS descriptors are currently at a different place compared to i386.
13451 - * Hopefully nobody expects them at a fixed place (Wine?)
13452 - */
13453 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13454 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13455 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13456 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13457 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13458 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13459 -#else
13460 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13461 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13462 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13463 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13464 - /*
13465 - * Segments used for calling PnP BIOS have byte granularity.
13466 - * They code segments and data segments have fixed 64k limits,
13467 - * the transfer segment sizes are set at run time.
13468 - */
13469 - /* 32-bit code */
13470 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13471 - /* 16-bit code */
13472 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13473 - /* 16-bit data */
13474 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13475 - /* 16-bit data */
13476 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13477 - /* 16-bit data */
13478 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13479 - /*
13480 - * The APM segments have byte granularity and their bases
13481 - * are set at run time. All have 64k limits.
13482 - */
13483 - /* 32-bit code */
13484 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13485 - /* 16-bit code */
13486 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13487 - /* data */
13488 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13489 -
13490 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13491 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13492 - GDT_STACK_CANARY_INIT
13493 -#endif
13494 -} };
13495 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13496 -
13497 static int __init x86_xsave_setup(char *s)
13498 {
13499 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13500 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13501 {
13502 struct desc_ptr gdt_descr;
13503
13504 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13505 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13506 gdt_descr.size = GDT_SIZE - 1;
13507 load_gdt(&gdt_descr);
13508 /* Reload the per-cpu base */
13509 @@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13510 /* Filter out anything that depends on CPUID levels we don't have */
13511 filter_cpuid_features(c, true);
13512
13513 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13514 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13515 +#endif
13516 +
13517 /* If the model name is still unset, do table lookup. */
13518 if (!c->x86_model_id[0]) {
13519 const char *p;
13520 @@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
13521 }
13522 __setup("clearcpuid=", setup_disablecpuid);
13523
13524 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13525 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13526 +
13527 #ifdef CONFIG_X86_64
13528 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13529 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
13530 - (unsigned long) nmi_idt_table };
13531 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
13532
13533 DEFINE_PER_CPU_FIRST(union irq_stack_union,
13534 irq_stack_union) __aligned(PAGE_SIZE);
13535 @@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13536 EXPORT_PER_CPU_SYMBOL(current_task);
13537
13538 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13539 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13540 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13541 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13542
13543 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13544 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13545 {
13546 memset(regs, 0, sizeof(struct pt_regs));
13547 regs->fs = __KERNEL_PERCPU;
13548 - regs->gs = __KERNEL_STACK_CANARY;
13549 + savesegment(gs, regs->gs);
13550
13551 return regs;
13552 }
13553 @@ -1190,7 +1142,7 @@ void __cpuinit cpu_init(void)
13554 int i;
13555
13556 cpu = stack_smp_processor_id();
13557 - t = &per_cpu(init_tss, cpu);
13558 + t = init_tss + cpu;
13559 oist = &per_cpu(orig_ist, cpu);
13560
13561 #ifdef CONFIG_NUMA
13562 @@ -1216,7 +1168,7 @@ void __cpuinit cpu_init(void)
13563 switch_to_new_gdt(cpu);
13564 loadsegment(fs, 0);
13565
13566 - load_idt((const struct desc_ptr *)&idt_descr);
13567 + load_idt(&idt_descr);
13568
13569 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13570 syscall_init();
13571 @@ -1225,7 +1177,6 @@ void __cpuinit cpu_init(void)
13572 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13573 barrier();
13574
13575 - x86_configure_nx();
13576 if (cpu != 0)
13577 enable_x2apic();
13578
13579 @@ -1281,7 +1232,7 @@ void __cpuinit cpu_init(void)
13580 {
13581 int cpu = smp_processor_id();
13582 struct task_struct *curr = current;
13583 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13584 + struct tss_struct *t = init_tss + cpu;
13585 struct thread_struct *thread = &curr->thread;
13586
13587 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13588 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13589 index 3e6ff6c..54b4992 100644
13590 --- a/arch/x86/kernel/cpu/intel.c
13591 +++ b/arch/x86/kernel/cpu/intel.c
13592 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13593 * Update the IDT descriptor and reload the IDT so that
13594 * it uses the read-only mapped virtual address.
13595 */
13596 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13597 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13598 load_idt(&idt_descr);
13599 }
13600 #endif
13601 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13602 index 5a11ae2..a1a1c8a 100644
13603 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13604 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13605 @@ -42,6 +42,7 @@
13606 #include <asm/processor.h>
13607 #include <asm/mce.h>
13608 #include <asm/msr.h>
13609 +#include <asm/local.h>
13610
13611 #include "mce-internal.h"
13612
13613 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
13614 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13615 m->cs, m->ip);
13616
13617 - if (m->cs == __KERNEL_CS)
13618 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13619 print_symbol("{%s}", m->ip);
13620 pr_cont("\n");
13621 }
13622 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
13623
13624 #define PANIC_TIMEOUT 5 /* 5 seconds */
13625
13626 -static atomic_t mce_paniced;
13627 +static atomic_unchecked_t mce_paniced;
13628
13629 static int fake_panic;
13630 -static atomic_t mce_fake_paniced;
13631 +static atomic_unchecked_t mce_fake_paniced;
13632
13633 /* Panic in progress. Enable interrupts and wait for final IPI */
13634 static void wait_for_panic(void)
13635 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13636 /*
13637 * Make sure only one CPU runs in machine check panic
13638 */
13639 - if (atomic_inc_return(&mce_paniced) > 1)
13640 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13641 wait_for_panic();
13642 barrier();
13643
13644 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13645 console_verbose();
13646 } else {
13647 /* Don't log too much for fake panic */
13648 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13649 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13650 return;
13651 }
13652 /* First print corrected ones that are still unlogged */
13653 @@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
13654 * might have been modified by someone else.
13655 */
13656 rmb();
13657 - if (atomic_read(&mce_paniced))
13658 + if (atomic_read_unchecked(&mce_paniced))
13659 wait_for_panic();
13660 if (!monarch_timeout)
13661 goto out;
13662 @@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13663 }
13664
13665 /* Call the installed machine check handler for this CPU setup. */
13666 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13667 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13668 unexpected_machine_check;
13669
13670 /*
13671 @@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13672 return;
13673 }
13674
13675 + pax_open_kernel();
13676 machine_check_vector = do_machine_check;
13677 + pax_close_kernel();
13678
13679 __mcheck_cpu_init_generic();
13680 __mcheck_cpu_init_vendor(c);
13681 @@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13682 */
13683
13684 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13685 -static int mce_chrdev_open_count; /* #times opened */
13686 +static local_t mce_chrdev_open_count; /* #times opened */
13687 static int mce_chrdev_open_exclu; /* already open exclusive? */
13688
13689 static int mce_chrdev_open(struct inode *inode, struct file *file)
13690 @@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13691 spin_lock(&mce_chrdev_state_lock);
13692
13693 if (mce_chrdev_open_exclu ||
13694 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13695 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13696 spin_unlock(&mce_chrdev_state_lock);
13697
13698 return -EBUSY;
13699 @@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13700
13701 if (file->f_flags & O_EXCL)
13702 mce_chrdev_open_exclu = 1;
13703 - mce_chrdev_open_count++;
13704 + local_inc(&mce_chrdev_open_count);
13705
13706 spin_unlock(&mce_chrdev_state_lock);
13707
13708 @@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13709 {
13710 spin_lock(&mce_chrdev_state_lock);
13711
13712 - mce_chrdev_open_count--;
13713 + local_dec(&mce_chrdev_open_count);
13714 mce_chrdev_open_exclu = 0;
13715
13716 spin_unlock(&mce_chrdev_state_lock);
13717 @@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
13718 static void mce_reset(void)
13719 {
13720 cpu_missing = 0;
13721 - atomic_set(&mce_fake_paniced, 0);
13722 + atomic_set_unchecked(&mce_fake_paniced, 0);
13723 atomic_set(&mce_executing, 0);
13724 atomic_set(&mce_callin, 0);
13725 atomic_set(&global_nwo, 0);
13726 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13727 index 5c0e653..0882b0a 100644
13728 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13729 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13730 @@ -12,6 +12,7 @@
13731 #include <asm/system.h>
13732 #include <asm/mce.h>
13733 #include <asm/msr.h>
13734 +#include <asm/pgtable.h>
13735
13736 /* By default disabled */
13737 int mce_p5_enabled __read_mostly;
13738 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13739 if (!cpu_has(c, X86_FEATURE_MCE))
13740 return;
13741
13742 + pax_open_kernel();
13743 machine_check_vector = pentium_machine_check;
13744 + pax_close_kernel();
13745 /* Make sure the vector pointer is visible before we enable MCEs: */
13746 wmb();
13747
13748 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13749 index 54060f5..c1a7577 100644
13750 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13751 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13752 @@ -11,6 +11,7 @@
13753 #include <asm/system.h>
13754 #include <asm/mce.h>
13755 #include <asm/msr.h>
13756 +#include <asm/pgtable.h>
13757
13758 /* Machine check handler for WinChip C6: */
13759 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13760 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13761 {
13762 u32 lo, hi;
13763
13764 + pax_open_kernel();
13765 machine_check_vector = winchip_machine_check;
13766 + pax_close_kernel();
13767 /* Make sure the vector pointer is visible before we enable MCEs: */
13768 wmb();
13769
13770 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13771 index 6b96110..0da73eb 100644
13772 --- a/arch/x86/kernel/cpu/mtrr/main.c
13773 +++ b/arch/x86/kernel/cpu/mtrr/main.c
13774 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13775 u64 size_or_mask, size_and_mask;
13776 static bool mtrr_aps_delayed_init;
13777
13778 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13779 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13780
13781 const struct mtrr_ops *mtrr_if;
13782
13783 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13784 index df5e41f..816c719 100644
13785 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13786 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13787 @@ -25,7 +25,7 @@ struct mtrr_ops {
13788 int (*validate_add_page)(unsigned long base, unsigned long size,
13789 unsigned int type);
13790 int (*have_wrcomb)(void);
13791 -};
13792 +} __do_const;
13793
13794 extern int generic_get_free_region(unsigned long base, unsigned long size,
13795 int replace_reg);
13796 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13797 index 5adce10..99284ec 100644
13798 --- a/arch/x86/kernel/cpu/perf_event.c
13799 +++ b/arch/x86/kernel/cpu/perf_event.c
13800 @@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13801 break;
13802
13803 perf_callchain_store(entry, frame.return_address);
13804 - fp = frame.next_frame;
13805 + fp = (const void __force_user *)frame.next_frame;
13806 }
13807 }
13808
13809 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13810 index 13ad899..f642b9a 100644
13811 --- a/arch/x86/kernel/crash.c
13812 +++ b/arch/x86/kernel/crash.c
13813 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13814 {
13815 #ifdef CONFIG_X86_32
13816 struct pt_regs fixed_regs;
13817 -#endif
13818
13819 -#ifdef CONFIG_X86_32
13820 - if (!user_mode_vm(regs)) {
13821 + if (!user_mode(regs)) {
13822 crash_fixup_ss_esp(&fixed_regs, regs);
13823 regs = &fixed_regs;
13824 }
13825 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
13826 index 37250fe..bf2ec74 100644
13827 --- a/arch/x86/kernel/doublefault_32.c
13828 +++ b/arch/x86/kernel/doublefault_32.c
13829 @@ -11,7 +11,7 @@
13830
13831 #define DOUBLEFAULT_STACKSIZE (1024)
13832 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
13833 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
13834 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
13835
13836 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
13837
13838 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
13839 unsigned long gdt, tss;
13840
13841 store_gdt(&gdt_desc);
13842 - gdt = gdt_desc.address;
13843 + gdt = (unsigned long)gdt_desc.address;
13844
13845 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
13846
13847 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
13848 /* 0x2 bit is always set */
13849 .flags = X86_EFLAGS_SF | 0x2,
13850 .sp = STACK_START,
13851 - .es = __USER_DS,
13852 + .es = __KERNEL_DS,
13853 .cs = __KERNEL_CS,
13854 .ss = __KERNEL_DS,
13855 - .ds = __USER_DS,
13856 + .ds = __KERNEL_DS,
13857 .fs = __KERNEL_PERCPU,
13858
13859 .__cr3 = __pa_nodebug(swapper_pg_dir),
13860 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
13861 index 4025fe4..d8451c6 100644
13862 --- a/arch/x86/kernel/dumpstack.c
13863 +++ b/arch/x86/kernel/dumpstack.c
13864 @@ -2,6 +2,9 @@
13865 * Copyright (C) 1991, 1992 Linus Torvalds
13866 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
13867 */
13868 +#ifdef CONFIG_GRKERNSEC_HIDESYM
13869 +#define __INCLUDED_BY_HIDESYM 1
13870 +#endif
13871 #include <linux/kallsyms.h>
13872 #include <linux/kprobes.h>
13873 #include <linux/uaccess.h>
13874 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
13875 static void
13876 print_ftrace_graph_addr(unsigned long addr, void *data,
13877 const struct stacktrace_ops *ops,
13878 - struct thread_info *tinfo, int *graph)
13879 + struct task_struct *task, int *graph)
13880 {
13881 - struct task_struct *task = tinfo->task;
13882 unsigned long ret_addr;
13883 int index = task->curr_ret_stack;
13884
13885 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13886 static inline void
13887 print_ftrace_graph_addr(unsigned long addr, void *data,
13888 const struct stacktrace_ops *ops,
13889 - struct thread_info *tinfo, int *graph)
13890 + struct task_struct *task, int *graph)
13891 { }
13892 #endif
13893
13894 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13895 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
13896 */
13897
13898 -static inline int valid_stack_ptr(struct thread_info *tinfo,
13899 - void *p, unsigned int size, void *end)
13900 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
13901 {
13902 - void *t = tinfo;
13903 if (end) {
13904 if (p < end && p >= (end-THREAD_SIZE))
13905 return 1;
13906 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
13907 }
13908
13909 unsigned long
13910 -print_context_stack(struct thread_info *tinfo,
13911 +print_context_stack(struct task_struct *task, void *stack_start,
13912 unsigned long *stack, unsigned long bp,
13913 const struct stacktrace_ops *ops, void *data,
13914 unsigned long *end, int *graph)
13915 {
13916 struct stack_frame *frame = (struct stack_frame *)bp;
13917
13918 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
13919 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
13920 unsigned long addr;
13921
13922 addr = *stack;
13923 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
13924 } else {
13925 ops->address(data, addr, 0);
13926 }
13927 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13928 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13929 }
13930 stack++;
13931 }
13932 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
13933 EXPORT_SYMBOL_GPL(print_context_stack);
13934
13935 unsigned long
13936 -print_context_stack_bp(struct thread_info *tinfo,
13937 +print_context_stack_bp(struct task_struct *task, void *stack_start,
13938 unsigned long *stack, unsigned long bp,
13939 const struct stacktrace_ops *ops, void *data,
13940 unsigned long *end, int *graph)
13941 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13942 struct stack_frame *frame = (struct stack_frame *)bp;
13943 unsigned long *ret_addr = &frame->return_address;
13944
13945 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
13946 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
13947 unsigned long addr = *ret_addr;
13948
13949 if (!__kernel_text_address(addr))
13950 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13951 ops->address(data, addr, 1);
13952 frame = frame->next_frame;
13953 ret_addr = &frame->return_address;
13954 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13955 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13956 }
13957
13958 return (unsigned long)frame;
13959 @@ -186,7 +186,7 @@ void dump_stack(void)
13960
13961 bp = stack_frame(current, NULL);
13962 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13963 - current->pid, current->comm, print_tainted(),
13964 + task_pid_nr(current), current->comm, print_tainted(),
13965 init_utsname()->release,
13966 (int)strcspn(init_utsname()->version, " "),
13967 init_utsname()->version);
13968 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
13969 }
13970 EXPORT_SYMBOL_GPL(oops_begin);
13971
13972 +extern void gr_handle_kernel_exploit(void);
13973 +
13974 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13975 {
13976 if (regs && kexec_should_crash(current))
13977 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13978 panic("Fatal exception in interrupt");
13979 if (panic_on_oops)
13980 panic("Fatal exception");
13981 - do_exit(signr);
13982 +
13983 + gr_handle_kernel_exploit();
13984 +
13985 + do_group_exit(signr);
13986 }
13987
13988 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13989 @@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13990
13991 show_registers(regs);
13992 #ifdef CONFIG_X86_32
13993 - if (user_mode_vm(regs)) {
13994 + if (user_mode(regs)) {
13995 sp = regs->sp;
13996 ss = regs->ss & 0xffff;
13997 } else {
13998 @@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
13999 unsigned long flags = oops_begin();
14000 int sig = SIGSEGV;
14001
14002 - if (!user_mode_vm(regs))
14003 + if (!user_mode(regs))
14004 report_bug(regs->ip, regs);
14005
14006 if (__die(str, regs, err))
14007 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14008 index c99f9ed..2a15d80 100644
14009 --- a/arch/x86/kernel/dumpstack_32.c
14010 +++ b/arch/x86/kernel/dumpstack_32.c
14011 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14012 bp = stack_frame(task, regs);
14013
14014 for (;;) {
14015 - struct thread_info *context;
14016 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14017
14018 - context = (struct thread_info *)
14019 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14020 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14021 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14022
14023 - stack = (unsigned long *)context->previous_esp;
14024 - if (!stack)
14025 + if (stack_start == task_stack_page(task))
14026 break;
14027 + stack = *(unsigned long **)stack_start;
14028 if (ops->stack(data, "IRQ") < 0)
14029 break;
14030 touch_nmi_watchdog();
14031 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14032 * When in-kernel, we also print out the stack and code at the
14033 * time of the fault..
14034 */
14035 - if (!user_mode_vm(regs)) {
14036 + if (!user_mode(regs)) {
14037 unsigned int code_prologue = code_bytes * 43 / 64;
14038 unsigned int code_len = code_bytes;
14039 unsigned char c;
14040 u8 *ip;
14041 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14042
14043 printk(KERN_EMERG "Stack:\n");
14044 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14045
14046 printk(KERN_EMERG "Code: ");
14047
14048 - ip = (u8 *)regs->ip - code_prologue;
14049 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14050 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14051 /* try starting at IP */
14052 - ip = (u8 *)regs->ip;
14053 + ip = (u8 *)regs->ip + cs_base;
14054 code_len = code_len - code_prologue + 1;
14055 }
14056 for (i = 0; i < code_len; i++, ip++) {
14057 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14058 printk(KERN_CONT " Bad EIP value.");
14059 break;
14060 }
14061 - if (ip == (u8 *)regs->ip)
14062 + if (ip == (u8 *)regs->ip + cs_base)
14063 printk(KERN_CONT "<%02x> ", c);
14064 else
14065 printk(KERN_CONT "%02x ", c);
14066 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14067 {
14068 unsigned short ud2;
14069
14070 + ip = ktla_ktva(ip);
14071 if (ip < PAGE_OFFSET)
14072 return 0;
14073 if (probe_kernel_address((unsigned short *)ip, ud2))
14074 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14075
14076 return ud2 == 0x0b0f;
14077 }
14078 +
14079 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14080 +void pax_check_alloca(unsigned long size)
14081 +{
14082 + unsigned long sp = (unsigned long)&sp, stack_left;
14083 +
14084 + /* all kernel stacks are of the same size */
14085 + stack_left = sp & (THREAD_SIZE - 1);
14086 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14087 +}
14088 +EXPORT_SYMBOL(pax_check_alloca);
14089 +#endif
14090 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14091 index 17107bd..b2deecf 100644
14092 --- a/arch/x86/kernel/dumpstack_64.c
14093 +++ b/arch/x86/kernel/dumpstack_64.c
14094 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14095 unsigned long *irq_stack_end =
14096 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14097 unsigned used = 0;
14098 - struct thread_info *tinfo;
14099 int graph = 0;
14100 unsigned long dummy;
14101 + void *stack_start;
14102
14103 if (!task)
14104 task = current;
14105 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14106 * current stack address. If the stacks consist of nested
14107 * exceptions
14108 */
14109 - tinfo = task_thread_info(task);
14110 for (;;) {
14111 char *id;
14112 unsigned long *estack_end;
14113 +
14114 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14115 &used, &id);
14116
14117 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14118 if (ops->stack(data, id) < 0)
14119 break;
14120
14121 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14122 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14123 data, estack_end, &graph);
14124 ops->stack(data, "<EOE>");
14125 /*
14126 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14127 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14128 if (ops->stack(data, "IRQ") < 0)
14129 break;
14130 - bp = ops->walk_stack(tinfo, stack, bp,
14131 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14132 ops, data, irq_stack_end, &graph);
14133 /*
14134 * We link to the next stack (which would be
14135 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14136 /*
14137 * This handles the process stack:
14138 */
14139 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14140 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14141 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14142 put_cpu();
14143 }
14144 EXPORT_SYMBOL(dump_trace);
14145 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14146
14147 return ud2 == 0x0b0f;
14148 }
14149 +
14150 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14151 +void pax_check_alloca(unsigned long size)
14152 +{
14153 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14154 + unsigned cpu, used;
14155 + char *id;
14156 +
14157 + /* check the process stack first */
14158 + stack_start = (unsigned long)task_stack_page(current);
14159 + stack_end = stack_start + THREAD_SIZE;
14160 + if (likely(stack_start <= sp && sp < stack_end)) {
14161 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14162 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14163 + return;
14164 + }
14165 +
14166 + cpu = get_cpu();
14167 +
14168 + /* check the irq stacks */
14169 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14170 + stack_start = stack_end - IRQ_STACK_SIZE;
14171 + if (stack_start <= sp && sp < stack_end) {
14172 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14173 + put_cpu();
14174 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14175 + return;
14176 + }
14177 +
14178 + /* check the exception stacks */
14179 + used = 0;
14180 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14181 + stack_start = stack_end - EXCEPTION_STKSZ;
14182 + if (stack_end && stack_start <= sp && sp < stack_end) {
14183 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14184 + put_cpu();
14185 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14186 + return;
14187 + }
14188 +
14189 + put_cpu();
14190 +
14191 + /* unknown stack */
14192 + BUG();
14193 +}
14194 +EXPORT_SYMBOL(pax_check_alloca);
14195 +#endif
14196 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14197 index 9b9f18b..9fcaa04 100644
14198 --- a/arch/x86/kernel/early_printk.c
14199 +++ b/arch/x86/kernel/early_printk.c
14200 @@ -7,6 +7,7 @@
14201 #include <linux/pci_regs.h>
14202 #include <linux/pci_ids.h>
14203 #include <linux/errno.h>
14204 +#include <linux/sched.h>
14205 #include <asm/io.h>
14206 #include <asm/processor.h>
14207 #include <asm/fcntl.h>
14208 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14209 index 79d97e6..76aaad7 100644
14210 --- a/arch/x86/kernel/entry_32.S
14211 +++ b/arch/x86/kernel/entry_32.S
14212 @@ -98,12 +98,6 @@
14213 #endif
14214 .endm
14215
14216 -#ifdef CONFIG_VM86
14217 -#define resume_userspace_sig check_userspace
14218 -#else
14219 -#define resume_userspace_sig resume_userspace
14220 -#endif
14221 -
14222 /*
14223 * User gs save/restore
14224 *
14225 @@ -185,13 +179,146 @@
14226 /*CFI_REL_OFFSET gs, PT_GS*/
14227 .endm
14228 .macro SET_KERNEL_GS reg
14229 +
14230 +#ifdef CONFIG_CC_STACKPROTECTOR
14231 movl $(__KERNEL_STACK_CANARY), \reg
14232 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14233 + movl $(__USER_DS), \reg
14234 +#else
14235 + xorl \reg, \reg
14236 +#endif
14237 +
14238 movl \reg, %gs
14239 .endm
14240
14241 #endif /* CONFIG_X86_32_LAZY_GS */
14242
14243 -.macro SAVE_ALL
14244 +.macro pax_enter_kernel
14245 +#ifdef CONFIG_PAX_KERNEXEC
14246 + call pax_enter_kernel
14247 +#endif
14248 +.endm
14249 +
14250 +.macro pax_exit_kernel
14251 +#ifdef CONFIG_PAX_KERNEXEC
14252 + call pax_exit_kernel
14253 +#endif
14254 +.endm
14255 +
14256 +#ifdef CONFIG_PAX_KERNEXEC
14257 +ENTRY(pax_enter_kernel)
14258 +#ifdef CONFIG_PARAVIRT
14259 + pushl %eax
14260 + pushl %ecx
14261 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14262 + mov %eax, %esi
14263 +#else
14264 + mov %cr0, %esi
14265 +#endif
14266 + bts $16, %esi
14267 + jnc 1f
14268 + mov %cs, %esi
14269 + cmp $__KERNEL_CS, %esi
14270 + jz 3f
14271 + ljmp $__KERNEL_CS, $3f
14272 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14273 +2:
14274 +#ifdef CONFIG_PARAVIRT
14275 + mov %esi, %eax
14276 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14277 +#else
14278 + mov %esi, %cr0
14279 +#endif
14280 +3:
14281 +#ifdef CONFIG_PARAVIRT
14282 + popl %ecx
14283 + popl %eax
14284 +#endif
14285 + ret
14286 +ENDPROC(pax_enter_kernel)
14287 +
14288 +ENTRY(pax_exit_kernel)
14289 +#ifdef CONFIG_PARAVIRT
14290 + pushl %eax
14291 + pushl %ecx
14292 +#endif
14293 + mov %cs, %esi
14294 + cmp $__KERNEXEC_KERNEL_CS, %esi
14295 + jnz 2f
14296 +#ifdef CONFIG_PARAVIRT
14297 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14298 + mov %eax, %esi
14299 +#else
14300 + mov %cr0, %esi
14301 +#endif
14302 + btr $16, %esi
14303 + ljmp $__KERNEL_CS, $1f
14304 +1:
14305 +#ifdef CONFIG_PARAVIRT
14306 + mov %esi, %eax
14307 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14308 +#else
14309 + mov %esi, %cr0
14310 +#endif
14311 +2:
14312 +#ifdef CONFIG_PARAVIRT
14313 + popl %ecx
14314 + popl %eax
14315 +#endif
14316 + ret
14317 +ENDPROC(pax_exit_kernel)
14318 +#endif
14319 +
14320 +.macro pax_erase_kstack
14321 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14322 + call pax_erase_kstack
14323 +#endif
14324 +.endm
14325 +
14326 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14327 +/*
14328 + * ebp: thread_info
14329 + * ecx, edx: can be clobbered
14330 + */
14331 +ENTRY(pax_erase_kstack)
14332 + pushl %edi
14333 + pushl %eax
14334 +
14335 + mov TI_lowest_stack(%ebp), %edi
14336 + mov $-0xBEEF, %eax
14337 + std
14338 +
14339 +1: mov %edi, %ecx
14340 + and $THREAD_SIZE_asm - 1, %ecx
14341 + shr $2, %ecx
14342 + repne scasl
14343 + jecxz 2f
14344 +
14345 + cmp $2*16, %ecx
14346 + jc 2f
14347 +
14348 + mov $2*16, %ecx
14349 + repe scasl
14350 + jecxz 2f
14351 + jne 1b
14352 +
14353 +2: cld
14354 + mov %esp, %ecx
14355 + sub %edi, %ecx
14356 + shr $2, %ecx
14357 + rep stosl
14358 +
14359 + mov TI_task_thread_sp0(%ebp), %edi
14360 + sub $128, %edi
14361 + mov %edi, TI_lowest_stack(%ebp)
14362 +
14363 + popl %eax
14364 + popl %edi
14365 + ret
14366 +ENDPROC(pax_erase_kstack)
14367 +#endif
14368 +
14369 +.macro __SAVE_ALL _DS
14370 cld
14371 PUSH_GS
14372 pushl_cfi %fs
14373 @@ -214,7 +341,7 @@
14374 CFI_REL_OFFSET ecx, 0
14375 pushl_cfi %ebx
14376 CFI_REL_OFFSET ebx, 0
14377 - movl $(__USER_DS), %edx
14378 + movl $\_DS, %edx
14379 movl %edx, %ds
14380 movl %edx, %es
14381 movl $(__KERNEL_PERCPU), %edx
14382 @@ -222,6 +349,15 @@
14383 SET_KERNEL_GS %edx
14384 .endm
14385
14386 +.macro SAVE_ALL
14387 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14388 + __SAVE_ALL __KERNEL_DS
14389 + pax_enter_kernel
14390 +#else
14391 + __SAVE_ALL __USER_DS
14392 +#endif
14393 +.endm
14394 +
14395 .macro RESTORE_INT_REGS
14396 popl_cfi %ebx
14397 CFI_RESTORE ebx
14398 @@ -307,7 +443,7 @@ ENTRY(ret_from_fork)
14399 popfl_cfi
14400 jmp syscall_exit
14401 CFI_ENDPROC
14402 -END(ret_from_fork)
14403 +ENDPROC(ret_from_fork)
14404
14405 /*
14406 * Interrupt exit functions should be protected against kprobes
14407 @@ -327,12 +463,29 @@ ret_from_exception:
14408 preempt_stop(CLBR_ANY)
14409 ret_from_intr:
14410 GET_THREAD_INFO(%ebp)
14411 -check_userspace:
14412 +resume_userspace_sig:
14413 +#ifdef CONFIG_VM86
14414 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
14415 movb PT_CS(%esp), %al
14416 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14417 +#else
14418 + /*
14419 + * We can be coming here from a syscall done in the kernel space,
14420 + * e.g. a failed kernel_execve().
14421 + */
14422 + movl PT_CS(%esp), %eax
14423 + andl $SEGMENT_RPL_MASK, %eax
14424 +#endif
14425 cmpl $USER_RPL, %eax
14426 +
14427 +#ifdef CONFIG_PAX_KERNEXEC
14428 + jae resume_userspace
14429 +
14430 + PAX_EXIT_KERNEL
14431 + jmp resume_kernel
14432 +#else
14433 jb resume_kernel # not returning to v8086 or userspace
14434 +#endif
14435
14436 ENTRY(resume_userspace)
14437 LOCKDEP_SYS_EXIT
14438 @@ -344,8 +497,8 @@ ENTRY(resume_userspace)
14439 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14440 # int/exception return?
14441 jne work_pending
14442 - jmp restore_all
14443 -END(ret_from_exception)
14444 + jmp restore_all_pax
14445 +ENDPROC(ret_from_exception)
14446
14447 #ifdef CONFIG_PREEMPT
14448 ENTRY(resume_kernel)
14449 @@ -360,7 +513,7 @@ need_resched:
14450 jz restore_all
14451 call preempt_schedule_irq
14452 jmp need_resched
14453 -END(resume_kernel)
14454 +ENDPROC(resume_kernel)
14455 #endif
14456 CFI_ENDPROC
14457 /*
14458 @@ -394,23 +547,34 @@ sysenter_past_esp:
14459 /*CFI_REL_OFFSET cs, 0*/
14460 /*
14461 * Push current_thread_info()->sysenter_return to the stack.
14462 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14463 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
14464 */
14465 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14466 + pushl_cfi $0
14467 CFI_REL_OFFSET eip, 0
14468
14469 pushl_cfi %eax
14470 SAVE_ALL
14471 + GET_THREAD_INFO(%ebp)
14472 + movl TI_sysenter_return(%ebp),%ebp
14473 + movl %ebp,PT_EIP(%esp)
14474 ENABLE_INTERRUPTS(CLBR_NONE)
14475
14476 /*
14477 * Load the potential sixth argument from user stack.
14478 * Careful about security.
14479 */
14480 + movl PT_OLDESP(%esp),%ebp
14481 +
14482 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14483 + mov PT_OLDSS(%esp),%ds
14484 +1: movl %ds:(%ebp),%ebp
14485 + push %ss
14486 + pop %ds
14487 +#else
14488 cmpl $__PAGE_OFFSET-3,%ebp
14489 jae syscall_fault
14490 1: movl (%ebp),%ebp
14491 +#endif
14492 +
14493 movl %ebp,PT_EBP(%esp)
14494 .section __ex_table,"a"
14495 .align 4
14496 @@ -433,12 +597,24 @@ sysenter_do_call:
14497 testl $_TIF_ALLWORK_MASK, %ecx
14498 jne sysexit_audit
14499 sysenter_exit:
14500 +
14501 +#ifdef CONFIG_PAX_RANDKSTACK
14502 + pushl_cfi %eax
14503 + movl %esp, %eax
14504 + call pax_randomize_kstack
14505 + popl_cfi %eax
14506 +#endif
14507 +
14508 + pax_erase_kstack
14509 +
14510 /* if something modifies registers it must also disable sysexit */
14511 movl PT_EIP(%esp), %edx
14512 movl PT_OLDESP(%esp), %ecx
14513 xorl %ebp,%ebp
14514 TRACE_IRQS_ON
14515 1: mov PT_FS(%esp), %fs
14516 +2: mov PT_DS(%esp), %ds
14517 +3: mov PT_ES(%esp), %es
14518 PTGS_TO_GS
14519 ENABLE_INTERRUPTS_SYSEXIT
14520
14521 @@ -455,6 +631,9 @@ sysenter_audit:
14522 movl %eax,%edx /* 2nd arg: syscall number */
14523 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14524 call __audit_syscall_entry
14525 +
14526 + pax_erase_kstack
14527 +
14528 pushl_cfi %ebx
14529 movl PT_EAX(%esp),%eax /* reload syscall number */
14530 jmp sysenter_do_call
14531 @@ -480,11 +659,17 @@ sysexit_audit:
14532
14533 CFI_ENDPROC
14534 .pushsection .fixup,"ax"
14535 -2: movl $0,PT_FS(%esp)
14536 +4: movl $0,PT_FS(%esp)
14537 + jmp 1b
14538 +5: movl $0,PT_DS(%esp)
14539 + jmp 1b
14540 +6: movl $0,PT_ES(%esp)
14541 jmp 1b
14542 .section __ex_table,"a"
14543 .align 4
14544 - .long 1b,2b
14545 + .long 1b,4b
14546 + .long 2b,5b
14547 + .long 3b,6b
14548 .popsection
14549 PTGS_TO_GS_EX
14550 ENDPROC(ia32_sysenter_target)
14551 @@ -517,6 +702,15 @@ syscall_exit:
14552 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14553 jne syscall_exit_work
14554
14555 +restore_all_pax:
14556 +
14557 +#ifdef CONFIG_PAX_RANDKSTACK
14558 + movl %esp, %eax
14559 + call pax_randomize_kstack
14560 +#endif
14561 +
14562 + pax_erase_kstack
14563 +
14564 restore_all:
14565 TRACE_IRQS_IRET
14566 restore_all_notrace:
14567 @@ -576,14 +770,34 @@ ldt_ss:
14568 * compensating for the offset by changing to the ESPFIX segment with
14569 * a base address that matches for the difference.
14570 */
14571 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14572 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14573 mov %esp, %edx /* load kernel esp */
14574 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14575 mov %dx, %ax /* eax: new kernel esp */
14576 sub %eax, %edx /* offset (low word is 0) */
14577 +#ifdef CONFIG_SMP
14578 + movl PER_CPU_VAR(cpu_number), %ebx
14579 + shll $PAGE_SHIFT_asm, %ebx
14580 + addl $cpu_gdt_table, %ebx
14581 +#else
14582 + movl $cpu_gdt_table, %ebx
14583 +#endif
14584 shr $16, %edx
14585 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14586 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14587 +
14588 +#ifdef CONFIG_PAX_KERNEXEC
14589 + mov %cr0, %esi
14590 + btr $16, %esi
14591 + mov %esi, %cr0
14592 +#endif
14593 +
14594 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14595 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14596 +
14597 +#ifdef CONFIG_PAX_KERNEXEC
14598 + bts $16, %esi
14599 + mov %esi, %cr0
14600 +#endif
14601 +
14602 pushl_cfi $__ESPFIX_SS
14603 pushl_cfi %eax /* new kernel esp */
14604 /* Disable interrupts, but do not irqtrace this section: we
14605 @@ -612,38 +826,30 @@ work_resched:
14606 movl TI_flags(%ebp), %ecx
14607 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14608 # than syscall tracing?
14609 - jz restore_all
14610 + jz restore_all_pax
14611 testb $_TIF_NEED_RESCHED, %cl
14612 jnz work_resched
14613
14614 work_notifysig: # deal with pending signals and
14615 # notify-resume requests
14616 + movl %esp, %eax
14617 #ifdef CONFIG_VM86
14618 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14619 - movl %esp, %eax
14620 - jne work_notifysig_v86 # returning to kernel-space or
14621 + jz 1f # returning to kernel-space or
14622 # vm86-space
14623 - TRACE_IRQS_ON
14624 - ENABLE_INTERRUPTS(CLBR_NONE)
14625 - xorl %edx, %edx
14626 - call do_notify_resume
14627 - jmp resume_userspace_sig
14628
14629 - ALIGN
14630 -work_notifysig_v86:
14631 pushl_cfi %ecx # save ti_flags for do_notify_resume
14632 call save_v86_state # %eax contains pt_regs pointer
14633 popl_cfi %ecx
14634 movl %eax, %esp
14635 -#else
14636 - movl %esp, %eax
14637 +1:
14638 #endif
14639 TRACE_IRQS_ON
14640 ENABLE_INTERRUPTS(CLBR_NONE)
14641 xorl %edx, %edx
14642 call do_notify_resume
14643 jmp resume_userspace_sig
14644 -END(work_pending)
14645 +ENDPROC(work_pending)
14646
14647 # perform syscall exit tracing
14648 ALIGN
14649 @@ -651,11 +857,14 @@ syscall_trace_entry:
14650 movl $-ENOSYS,PT_EAX(%esp)
14651 movl %esp, %eax
14652 call syscall_trace_enter
14653 +
14654 + pax_erase_kstack
14655 +
14656 /* What it returned is what we'll actually use. */
14657 cmpl $(NR_syscalls), %eax
14658 jnae syscall_call
14659 jmp syscall_exit
14660 -END(syscall_trace_entry)
14661 +ENDPROC(syscall_trace_entry)
14662
14663 # perform syscall exit tracing
14664 ALIGN
14665 @@ -668,20 +877,24 @@ syscall_exit_work:
14666 movl %esp, %eax
14667 call syscall_trace_leave
14668 jmp resume_userspace
14669 -END(syscall_exit_work)
14670 +ENDPROC(syscall_exit_work)
14671 CFI_ENDPROC
14672
14673 RING0_INT_FRAME # can't unwind into user space anyway
14674 syscall_fault:
14675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14676 + push %ss
14677 + pop %ds
14678 +#endif
14679 GET_THREAD_INFO(%ebp)
14680 movl $-EFAULT,PT_EAX(%esp)
14681 jmp resume_userspace
14682 -END(syscall_fault)
14683 +ENDPROC(syscall_fault)
14684
14685 syscall_badsys:
14686 movl $-ENOSYS,PT_EAX(%esp)
14687 jmp resume_userspace
14688 -END(syscall_badsys)
14689 +ENDPROC(syscall_badsys)
14690 CFI_ENDPROC
14691 /*
14692 * End of kprobes section
14693 @@ -753,6 +966,36 @@ ENTRY(ptregs_clone)
14694 CFI_ENDPROC
14695 ENDPROC(ptregs_clone)
14696
14697 + ALIGN;
14698 +ENTRY(kernel_execve)
14699 + CFI_STARTPROC
14700 + pushl_cfi %ebp
14701 + sub $PT_OLDSS+4,%esp
14702 + pushl_cfi %edi
14703 + pushl_cfi %ecx
14704 + pushl_cfi %eax
14705 + lea 3*4(%esp),%edi
14706 + mov $PT_OLDSS/4+1,%ecx
14707 + xorl %eax,%eax
14708 + rep stosl
14709 + popl_cfi %eax
14710 + popl_cfi %ecx
14711 + popl_cfi %edi
14712 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14713 + pushl_cfi %esp
14714 + call sys_execve
14715 + add $4,%esp
14716 + CFI_ADJUST_CFA_OFFSET -4
14717 + GET_THREAD_INFO(%ebp)
14718 + test %eax,%eax
14719 + jz syscall_exit
14720 + add $PT_OLDSS+4,%esp
14721 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14722 + popl_cfi %ebp
14723 + ret
14724 + CFI_ENDPROC
14725 +ENDPROC(kernel_execve)
14726 +
14727 .macro FIXUP_ESPFIX_STACK
14728 /*
14729 * Switch back for ESPFIX stack to the normal zerobased stack
14730 @@ -762,8 +1005,15 @@ ENDPROC(ptregs_clone)
14731 * normal stack and adjusts ESP with the matching offset.
14732 */
14733 /* fixup the stack */
14734 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14735 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14736 +#ifdef CONFIG_SMP
14737 + movl PER_CPU_VAR(cpu_number), %ebx
14738 + shll $PAGE_SHIFT_asm, %ebx
14739 + addl $cpu_gdt_table, %ebx
14740 +#else
14741 + movl $cpu_gdt_table, %ebx
14742 +#endif
14743 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14744 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14745 shl $16, %eax
14746 addl %esp, %eax /* the adjusted stack pointer */
14747 pushl_cfi $__KERNEL_DS
14748 @@ -816,7 +1066,7 @@ vector=vector+1
14749 .endr
14750 2: jmp common_interrupt
14751 .endr
14752 -END(irq_entries_start)
14753 +ENDPROC(irq_entries_start)
14754
14755 .previous
14756 END(interrupt)
14757 @@ -864,7 +1114,7 @@ ENTRY(coprocessor_error)
14758 pushl_cfi $do_coprocessor_error
14759 jmp error_code
14760 CFI_ENDPROC
14761 -END(coprocessor_error)
14762 +ENDPROC(coprocessor_error)
14763
14764 ENTRY(simd_coprocessor_error)
14765 RING0_INT_FRAME
14766 @@ -885,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
14767 #endif
14768 jmp error_code
14769 CFI_ENDPROC
14770 -END(simd_coprocessor_error)
14771 +ENDPROC(simd_coprocessor_error)
14772
14773 ENTRY(device_not_available)
14774 RING0_INT_FRAME
14775 @@ -893,7 +1143,7 @@ ENTRY(device_not_available)
14776 pushl_cfi $do_device_not_available
14777 jmp error_code
14778 CFI_ENDPROC
14779 -END(device_not_available)
14780 +ENDPROC(device_not_available)
14781
14782 #ifdef CONFIG_PARAVIRT
14783 ENTRY(native_iret)
14784 @@ -902,12 +1152,12 @@ ENTRY(native_iret)
14785 .align 4
14786 .long native_iret, iret_exc
14787 .previous
14788 -END(native_iret)
14789 +ENDPROC(native_iret)
14790
14791 ENTRY(native_irq_enable_sysexit)
14792 sti
14793 sysexit
14794 -END(native_irq_enable_sysexit)
14795 +ENDPROC(native_irq_enable_sysexit)
14796 #endif
14797
14798 ENTRY(overflow)
14799 @@ -916,7 +1166,7 @@ ENTRY(overflow)
14800 pushl_cfi $do_overflow
14801 jmp error_code
14802 CFI_ENDPROC
14803 -END(overflow)
14804 +ENDPROC(overflow)
14805
14806 ENTRY(bounds)
14807 RING0_INT_FRAME
14808 @@ -924,7 +1174,7 @@ ENTRY(bounds)
14809 pushl_cfi $do_bounds
14810 jmp error_code
14811 CFI_ENDPROC
14812 -END(bounds)
14813 +ENDPROC(bounds)
14814
14815 ENTRY(invalid_op)
14816 RING0_INT_FRAME
14817 @@ -932,7 +1182,7 @@ ENTRY(invalid_op)
14818 pushl_cfi $do_invalid_op
14819 jmp error_code
14820 CFI_ENDPROC
14821 -END(invalid_op)
14822 +ENDPROC(invalid_op)
14823
14824 ENTRY(coprocessor_segment_overrun)
14825 RING0_INT_FRAME
14826 @@ -940,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
14827 pushl_cfi $do_coprocessor_segment_overrun
14828 jmp error_code
14829 CFI_ENDPROC
14830 -END(coprocessor_segment_overrun)
14831 +ENDPROC(coprocessor_segment_overrun)
14832
14833 ENTRY(invalid_TSS)
14834 RING0_EC_FRAME
14835 pushl_cfi $do_invalid_TSS
14836 jmp error_code
14837 CFI_ENDPROC
14838 -END(invalid_TSS)
14839 +ENDPROC(invalid_TSS)
14840
14841 ENTRY(segment_not_present)
14842 RING0_EC_FRAME
14843 pushl_cfi $do_segment_not_present
14844 jmp error_code
14845 CFI_ENDPROC
14846 -END(segment_not_present)
14847 +ENDPROC(segment_not_present)
14848
14849 ENTRY(stack_segment)
14850 RING0_EC_FRAME
14851 pushl_cfi $do_stack_segment
14852 jmp error_code
14853 CFI_ENDPROC
14854 -END(stack_segment)
14855 +ENDPROC(stack_segment)
14856
14857 ENTRY(alignment_check)
14858 RING0_EC_FRAME
14859 pushl_cfi $do_alignment_check
14860 jmp error_code
14861 CFI_ENDPROC
14862 -END(alignment_check)
14863 +ENDPROC(alignment_check)
14864
14865 ENTRY(divide_error)
14866 RING0_INT_FRAME
14867 @@ -976,7 +1226,7 @@ ENTRY(divide_error)
14868 pushl_cfi $do_divide_error
14869 jmp error_code
14870 CFI_ENDPROC
14871 -END(divide_error)
14872 +ENDPROC(divide_error)
14873
14874 #ifdef CONFIG_X86_MCE
14875 ENTRY(machine_check)
14876 @@ -985,7 +1235,7 @@ ENTRY(machine_check)
14877 pushl_cfi machine_check_vector
14878 jmp error_code
14879 CFI_ENDPROC
14880 -END(machine_check)
14881 +ENDPROC(machine_check)
14882 #endif
14883
14884 ENTRY(spurious_interrupt_bug)
14885 @@ -994,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
14886 pushl_cfi $do_spurious_interrupt_bug
14887 jmp error_code
14888 CFI_ENDPROC
14889 -END(spurious_interrupt_bug)
14890 +ENDPROC(spurious_interrupt_bug)
14891 /*
14892 * End of kprobes section
14893 */
14894 @@ -1109,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
14895
14896 ENTRY(mcount)
14897 ret
14898 -END(mcount)
14899 +ENDPROC(mcount)
14900
14901 ENTRY(ftrace_caller)
14902 cmpl $0, function_trace_stop
14903 @@ -1138,7 +1388,7 @@ ftrace_graph_call:
14904 .globl ftrace_stub
14905 ftrace_stub:
14906 ret
14907 -END(ftrace_caller)
14908 +ENDPROC(ftrace_caller)
14909
14910 #else /* ! CONFIG_DYNAMIC_FTRACE */
14911
14912 @@ -1174,7 +1424,7 @@ trace:
14913 popl %ecx
14914 popl %eax
14915 jmp ftrace_stub
14916 -END(mcount)
14917 +ENDPROC(mcount)
14918 #endif /* CONFIG_DYNAMIC_FTRACE */
14919 #endif /* CONFIG_FUNCTION_TRACER */
14920
14921 @@ -1195,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
14922 popl %ecx
14923 popl %eax
14924 ret
14925 -END(ftrace_graph_caller)
14926 +ENDPROC(ftrace_graph_caller)
14927
14928 .globl return_to_handler
14929 return_to_handler:
14930 @@ -1250,15 +1500,18 @@ error_code:
14931 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
14932 REG_TO_PTGS %ecx
14933 SET_KERNEL_GS %ecx
14934 - movl $(__USER_DS), %ecx
14935 + movl $(__KERNEL_DS), %ecx
14936 movl %ecx, %ds
14937 movl %ecx, %es
14938 +
14939 + pax_enter_kernel
14940 +
14941 TRACE_IRQS_OFF
14942 movl %esp,%eax # pt_regs pointer
14943 call *%edi
14944 jmp ret_from_exception
14945 CFI_ENDPROC
14946 -END(page_fault)
14947 +ENDPROC(page_fault)
14948
14949 /*
14950 * Debug traps and NMI can happen at the one SYSENTER instruction
14951 @@ -1300,7 +1553,7 @@ debug_stack_correct:
14952 call do_debug
14953 jmp ret_from_exception
14954 CFI_ENDPROC
14955 -END(debug)
14956 +ENDPROC(debug)
14957
14958 /*
14959 * NMI is doubly nasty. It can happen _while_ we're handling
14960 @@ -1337,6 +1590,9 @@ nmi_stack_correct:
14961 xorl %edx,%edx # zero error code
14962 movl %esp,%eax # pt_regs pointer
14963 call do_nmi
14964 +
14965 + pax_exit_kernel
14966 +
14967 jmp restore_all_notrace
14968 CFI_ENDPROC
14969
14970 @@ -1373,12 +1629,15 @@ nmi_espfix_stack:
14971 FIXUP_ESPFIX_STACK # %eax == %esp
14972 xorl %edx,%edx # zero error code
14973 call do_nmi
14974 +
14975 + pax_exit_kernel
14976 +
14977 RESTORE_REGS
14978 lss 12+4(%esp), %esp # back to espfix stack
14979 CFI_ADJUST_CFA_OFFSET -24
14980 jmp irq_return
14981 CFI_ENDPROC
14982 -END(nmi)
14983 +ENDPROC(nmi)
14984
14985 ENTRY(int3)
14986 RING0_INT_FRAME
14987 @@ -1390,14 +1649,14 @@ ENTRY(int3)
14988 call do_int3
14989 jmp ret_from_exception
14990 CFI_ENDPROC
14991 -END(int3)
14992 +ENDPROC(int3)
14993
14994 ENTRY(general_protection)
14995 RING0_EC_FRAME
14996 pushl_cfi $do_general_protection
14997 jmp error_code
14998 CFI_ENDPROC
14999 -END(general_protection)
15000 +ENDPROC(general_protection)
15001
15002 #ifdef CONFIG_KVM_GUEST
15003 ENTRY(async_page_fault)
15004 @@ -1405,7 +1664,7 @@ ENTRY(async_page_fault)
15005 pushl_cfi $do_async_page_fault
15006 jmp error_code
15007 CFI_ENDPROC
15008 -END(async_page_fault)
15009 +ENDPROC(async_page_fault)
15010 #endif
15011
15012 /*
15013 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15014 index 1333d98..b340ca2 100644
15015 --- a/arch/x86/kernel/entry_64.S
15016 +++ b/arch/x86/kernel/entry_64.S
15017 @@ -56,6 +56,8 @@
15018 #include <asm/ftrace.h>
15019 #include <asm/percpu.h>
15020 #include <linux/err.h>
15021 +#include <asm/pgtable.h>
15022 +#include <asm/alternative-asm.h>
15023
15024 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15025 #include <linux/elf-em.h>
15026 @@ -69,8 +71,9 @@
15027 #ifdef CONFIG_FUNCTION_TRACER
15028 #ifdef CONFIG_DYNAMIC_FTRACE
15029 ENTRY(mcount)
15030 + pax_force_retaddr
15031 retq
15032 -END(mcount)
15033 +ENDPROC(mcount)
15034
15035 ENTRY(ftrace_caller)
15036 cmpl $0, function_trace_stop
15037 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15038 #endif
15039
15040 GLOBAL(ftrace_stub)
15041 + pax_force_retaddr
15042 retq
15043 -END(ftrace_caller)
15044 +ENDPROC(ftrace_caller)
15045
15046 #else /* ! CONFIG_DYNAMIC_FTRACE */
15047 ENTRY(mcount)
15048 @@ -113,6 +117,7 @@ ENTRY(mcount)
15049 #endif
15050
15051 GLOBAL(ftrace_stub)
15052 + pax_force_retaddr
15053 retq
15054
15055 trace:
15056 @@ -122,12 +127,13 @@ trace:
15057 movq 8(%rbp), %rsi
15058 subq $MCOUNT_INSN_SIZE, %rdi
15059
15060 + pax_force_fptr ftrace_trace_function
15061 call *ftrace_trace_function
15062
15063 MCOUNT_RESTORE_FRAME
15064
15065 jmp ftrace_stub
15066 -END(mcount)
15067 +ENDPROC(mcount)
15068 #endif /* CONFIG_DYNAMIC_FTRACE */
15069 #endif /* CONFIG_FUNCTION_TRACER */
15070
15071 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15072
15073 MCOUNT_RESTORE_FRAME
15074
15075 + pax_force_retaddr
15076 retq
15077 -END(ftrace_graph_caller)
15078 +ENDPROC(ftrace_graph_caller)
15079
15080 GLOBAL(return_to_handler)
15081 subq $24, %rsp
15082 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15083 movq 8(%rsp), %rdx
15084 movq (%rsp), %rax
15085 addq $24, %rsp
15086 + pax_force_fptr %rdi
15087 jmp *%rdi
15088 #endif
15089
15090 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15091 ENDPROC(native_usergs_sysret64)
15092 #endif /* CONFIG_PARAVIRT */
15093
15094 + .macro ljmpq sel, off
15095 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15096 + .byte 0x48; ljmp *1234f(%rip)
15097 + .pushsection .rodata
15098 + .align 16
15099 + 1234: .quad \off; .word \sel
15100 + .popsection
15101 +#else
15102 + pushq $\sel
15103 + pushq $\off
15104 + lretq
15105 +#endif
15106 + .endm
15107 +
15108 + .macro pax_enter_kernel
15109 + pax_set_fptr_mask
15110 +#ifdef CONFIG_PAX_KERNEXEC
15111 + call pax_enter_kernel
15112 +#endif
15113 + .endm
15114 +
15115 + .macro pax_exit_kernel
15116 +#ifdef CONFIG_PAX_KERNEXEC
15117 + call pax_exit_kernel
15118 +#endif
15119 + .endm
15120 +
15121 +#ifdef CONFIG_PAX_KERNEXEC
15122 +ENTRY(pax_enter_kernel)
15123 + pushq %rdi
15124 +
15125 +#ifdef CONFIG_PARAVIRT
15126 + PV_SAVE_REGS(CLBR_RDI)
15127 +#endif
15128 +
15129 + GET_CR0_INTO_RDI
15130 + bts $16,%rdi
15131 + jnc 3f
15132 + mov %cs,%edi
15133 + cmp $__KERNEL_CS,%edi
15134 + jnz 2f
15135 +1:
15136 +
15137 +#ifdef CONFIG_PARAVIRT
15138 + PV_RESTORE_REGS(CLBR_RDI)
15139 +#endif
15140 +
15141 + popq %rdi
15142 + pax_force_retaddr
15143 + retq
15144 +
15145 +2: ljmpq __KERNEL_CS,1f
15146 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15147 +4: SET_RDI_INTO_CR0
15148 + jmp 1b
15149 +ENDPROC(pax_enter_kernel)
15150 +
15151 +ENTRY(pax_exit_kernel)
15152 + pushq %rdi
15153 +
15154 +#ifdef CONFIG_PARAVIRT
15155 + PV_SAVE_REGS(CLBR_RDI)
15156 +#endif
15157 +
15158 + mov %cs,%rdi
15159 + cmp $__KERNEXEC_KERNEL_CS,%edi
15160 + jz 2f
15161 +1:
15162 +
15163 +#ifdef CONFIG_PARAVIRT
15164 + PV_RESTORE_REGS(CLBR_RDI);
15165 +#endif
15166 +
15167 + popq %rdi
15168 + pax_force_retaddr
15169 + retq
15170 +
15171 +2: GET_CR0_INTO_RDI
15172 + btr $16,%rdi
15173 + ljmpq __KERNEL_CS,3f
15174 +3: SET_RDI_INTO_CR0
15175 + jmp 1b
15176 +#ifdef CONFIG_PARAVIRT
15177 + PV_RESTORE_REGS(CLBR_RDI);
15178 +#endif
15179 +
15180 + popq %rdi
15181 + pax_force_retaddr
15182 + retq
15183 +ENDPROC(pax_exit_kernel)
15184 +#endif
15185 +
15186 + .macro pax_enter_kernel_user
15187 + pax_set_fptr_mask
15188 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15189 + call pax_enter_kernel_user
15190 +#endif
15191 + .endm
15192 +
15193 + .macro pax_exit_kernel_user
15194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15195 + call pax_exit_kernel_user
15196 +#endif
15197 +#ifdef CONFIG_PAX_RANDKSTACK
15198 + pushq %rax
15199 + call pax_randomize_kstack
15200 + popq %rax
15201 +#endif
15202 + .endm
15203 +
15204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15205 +ENTRY(pax_enter_kernel_user)
15206 + pushq %rdi
15207 + pushq %rbx
15208 +
15209 +#ifdef CONFIG_PARAVIRT
15210 + PV_SAVE_REGS(CLBR_RDI)
15211 +#endif
15212 +
15213 + GET_CR3_INTO_RDI
15214 + mov %rdi,%rbx
15215 + add $__START_KERNEL_map,%rbx
15216 + sub phys_base(%rip),%rbx
15217 +
15218 +#ifdef CONFIG_PARAVIRT
15219 + pushq %rdi
15220 + cmpl $0, pv_info+PARAVIRT_enabled
15221 + jz 1f
15222 + i = 0
15223 + .rept USER_PGD_PTRS
15224 + mov i*8(%rbx),%rsi
15225 + mov $0,%sil
15226 + lea i*8(%rbx),%rdi
15227 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15228 + i = i + 1
15229 + .endr
15230 + jmp 2f
15231 +1:
15232 +#endif
15233 +
15234 + i = 0
15235 + .rept USER_PGD_PTRS
15236 + movb $0,i*8(%rbx)
15237 + i = i + 1
15238 + .endr
15239 +
15240 +#ifdef CONFIG_PARAVIRT
15241 +2: popq %rdi
15242 +#endif
15243 + SET_RDI_INTO_CR3
15244 +
15245 +#ifdef CONFIG_PAX_KERNEXEC
15246 + GET_CR0_INTO_RDI
15247 + bts $16,%rdi
15248 + SET_RDI_INTO_CR0
15249 +#endif
15250 +
15251 +#ifdef CONFIG_PARAVIRT
15252 + PV_RESTORE_REGS(CLBR_RDI)
15253 +#endif
15254 +
15255 + popq %rbx
15256 + popq %rdi
15257 + pax_force_retaddr
15258 + retq
15259 +ENDPROC(pax_enter_kernel_user)
15260 +
15261 +ENTRY(pax_exit_kernel_user)
15262 + push %rdi
15263 +
15264 +#ifdef CONFIG_PARAVIRT
15265 + pushq %rbx
15266 + PV_SAVE_REGS(CLBR_RDI)
15267 +#endif
15268 +
15269 +#ifdef CONFIG_PAX_KERNEXEC
15270 + GET_CR0_INTO_RDI
15271 + btr $16,%rdi
15272 + SET_RDI_INTO_CR0
15273 +#endif
15274 +
15275 + GET_CR3_INTO_RDI
15276 + add $__START_KERNEL_map,%rdi
15277 + sub phys_base(%rip),%rdi
15278 +
15279 +#ifdef CONFIG_PARAVIRT
15280 + cmpl $0, pv_info+PARAVIRT_enabled
15281 + jz 1f
15282 + mov %rdi,%rbx
15283 + i = 0
15284 + .rept USER_PGD_PTRS
15285 + mov i*8(%rbx),%rsi
15286 + mov $0x67,%sil
15287 + lea i*8(%rbx),%rdi
15288 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15289 + i = i + 1
15290 + .endr
15291 + jmp 2f
15292 +1:
15293 +#endif
15294 +
15295 + i = 0
15296 + .rept USER_PGD_PTRS
15297 + movb $0x67,i*8(%rdi)
15298 + i = i + 1
15299 + .endr
15300 +
15301 +#ifdef CONFIG_PARAVIRT
15302 +2: PV_RESTORE_REGS(CLBR_RDI)
15303 + popq %rbx
15304 +#endif
15305 +
15306 + popq %rdi
15307 + pax_force_retaddr
15308 + retq
15309 +ENDPROC(pax_exit_kernel_user)
15310 +#endif
15311 +
15312 +.macro pax_erase_kstack
15313 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15314 + call pax_erase_kstack
15315 +#endif
15316 +.endm
15317 +
15318 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15319 +/*
15320 + * r11: thread_info
15321 + * rcx, rdx: can be clobbered
15322 + */
15323 +ENTRY(pax_erase_kstack)
15324 + pushq %rdi
15325 + pushq %rax
15326 + pushq %r11
15327 +
15328 + GET_THREAD_INFO(%r11)
15329 + mov TI_lowest_stack(%r11), %rdi
15330 + mov $-0xBEEF, %rax
15331 + std
15332 +
15333 +1: mov %edi, %ecx
15334 + and $THREAD_SIZE_asm - 1, %ecx
15335 + shr $3, %ecx
15336 + repne scasq
15337 + jecxz 2f
15338 +
15339 + cmp $2*8, %ecx
15340 + jc 2f
15341 +
15342 + mov $2*8, %ecx
15343 + repe scasq
15344 + jecxz 2f
15345 + jne 1b
15346 +
15347 +2: cld
15348 + mov %esp, %ecx
15349 + sub %edi, %ecx
15350 +
15351 + cmp $THREAD_SIZE_asm, %rcx
15352 + jb 3f
15353 + ud2
15354 +3:
15355 +
15356 + shr $3, %ecx
15357 + rep stosq
15358 +
15359 + mov TI_task_thread_sp0(%r11), %rdi
15360 + sub $256, %rdi
15361 + mov %rdi, TI_lowest_stack(%r11)
15362 +
15363 + popq %r11
15364 + popq %rax
15365 + popq %rdi
15366 + pax_force_retaddr
15367 + ret
15368 +ENDPROC(pax_erase_kstack)
15369 +#endif
15370
15371 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15372 #ifdef CONFIG_TRACE_IRQFLAGS
15373 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15374 .endm
15375
15376 .macro UNFAKE_STACK_FRAME
15377 - addq $8*6, %rsp
15378 - CFI_ADJUST_CFA_OFFSET -(6*8)
15379 + addq $8*6 + ARG_SKIP, %rsp
15380 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15381 .endm
15382
15383 /*
15384 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15385 movq %rsp, %rsi
15386
15387 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15388 - testl $3, CS(%rdi)
15389 + testb $3, CS(%rdi)
15390 je 1f
15391 SWAPGS
15392 /*
15393 @@ -356,9 +640,10 @@ ENTRY(save_rest)
15394 movq_cfi r15, R15+16
15395 movq %r11, 8(%rsp) /* return address */
15396 FIXUP_TOP_OF_STACK %r11, 16
15397 + pax_force_retaddr
15398 ret
15399 CFI_ENDPROC
15400 -END(save_rest)
15401 +ENDPROC(save_rest)
15402
15403 /* save complete stack frame */
15404 .pushsection .kprobes.text, "ax"
15405 @@ -387,9 +672,10 @@ ENTRY(save_paranoid)
15406 js 1f /* negative -> in kernel */
15407 SWAPGS
15408 xorl %ebx,%ebx
15409 -1: ret
15410 +1: pax_force_retaddr_bts
15411 + ret
15412 CFI_ENDPROC
15413 -END(save_paranoid)
15414 +ENDPROC(save_paranoid)
15415 .popsection
15416
15417 /*
15418 @@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
15419
15420 RESTORE_REST
15421
15422 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15423 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15424 jz retint_restore_args
15425
15426 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15427 @@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
15428 jmp ret_from_sys_call # go to the SYSRET fastpath
15429
15430 CFI_ENDPROC
15431 -END(ret_from_fork)
15432 +ENDPROC(ret_from_fork)
15433
15434 /*
15435 * System call entry. Up to 6 arguments in registers are supported.
15436 @@ -457,7 +743,7 @@ END(ret_from_fork)
15437 ENTRY(system_call)
15438 CFI_STARTPROC simple
15439 CFI_SIGNAL_FRAME
15440 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15441 + CFI_DEF_CFA rsp,0
15442 CFI_REGISTER rip,rcx
15443 /*CFI_REGISTER rflags,r11*/
15444 SWAPGS_UNSAFE_STACK
15445 @@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
15446
15447 movq %rsp,PER_CPU_VAR(old_rsp)
15448 movq PER_CPU_VAR(kernel_stack),%rsp
15449 + SAVE_ARGS 8*6,0
15450 + pax_enter_kernel_user
15451 /*
15452 * No need to follow this irqs off/on section - it's straight
15453 * and short:
15454 */
15455 ENABLE_INTERRUPTS(CLBR_NONE)
15456 - SAVE_ARGS 8,0
15457 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15458 movq %rcx,RIP-ARGOFFSET(%rsp)
15459 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15460 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15461 + GET_THREAD_INFO(%rcx)
15462 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
15463 jnz tracesys
15464 system_call_fastpath:
15465 cmpq $__NR_syscall_max,%rax
15466 ja badsys
15467 - movq %r10,%rcx
15468 + movq R10-ARGOFFSET(%rsp),%rcx
15469 call *sys_call_table(,%rax,8) # XXX: rip relative
15470 movq %rax,RAX-ARGOFFSET(%rsp)
15471 /*
15472 @@ -498,10 +786,13 @@ sysret_check:
15473 LOCKDEP_SYS_EXIT
15474 DISABLE_INTERRUPTS(CLBR_NONE)
15475 TRACE_IRQS_OFF
15476 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
15477 + GET_THREAD_INFO(%rcx)
15478 + movl TI_flags(%rcx),%edx
15479 andl %edi,%edx
15480 jnz sysret_careful
15481 CFI_REMEMBER_STATE
15482 + pax_exit_kernel_user
15483 + pax_erase_kstack
15484 /*
15485 * sysretq will re-enable interrupts:
15486 */
15487 @@ -553,14 +844,18 @@ badsys:
15488 * jump back to the normal fast path.
15489 */
15490 auditsys:
15491 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
15492 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15493 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15494 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15495 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15496 movq %rax,%rsi /* 2nd arg: syscall number */
15497 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15498 call __audit_syscall_entry
15499 +
15500 + pax_erase_kstack
15501 +
15502 LOAD_ARGS 0 /* reload call-clobbered registers */
15503 + pax_set_fptr_mask
15504 jmp system_call_fastpath
15505
15506 /*
15507 @@ -581,7 +876,7 @@ sysret_audit:
15508 /* Do syscall tracing */
15509 tracesys:
15510 #ifdef CONFIG_AUDITSYSCALL
15511 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15512 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
15513 jz auditsys
15514 #endif
15515 SAVE_REST
15516 @@ -589,16 +884,20 @@ tracesys:
15517 FIXUP_TOP_OF_STACK %rdi
15518 movq %rsp,%rdi
15519 call syscall_trace_enter
15520 +
15521 + pax_erase_kstack
15522 +
15523 /*
15524 * Reload arg registers from stack in case ptrace changed them.
15525 * We don't reload %rax because syscall_trace_enter() returned
15526 * the value it wants us to use in the table lookup.
15527 */
15528 LOAD_ARGS ARGOFFSET, 1
15529 + pax_set_fptr_mask
15530 RESTORE_REST
15531 cmpq $__NR_syscall_max,%rax
15532 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15533 - movq %r10,%rcx /* fixup for C */
15534 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15535 call *sys_call_table(,%rax,8)
15536 movq %rax,RAX-ARGOFFSET(%rsp)
15537 /* Use IRET because user could have changed frame */
15538 @@ -619,6 +918,7 @@ GLOBAL(int_with_check)
15539 andl %edi,%edx
15540 jnz int_careful
15541 andl $~TS_COMPAT,TI_status(%rcx)
15542 + pax_erase_kstack
15543 jmp retint_swapgs
15544
15545 /* Either reschedule or signal or syscall exit tracking needed. */
15546 @@ -665,7 +965,7 @@ int_restore_rest:
15547 TRACE_IRQS_OFF
15548 jmp int_with_check
15549 CFI_ENDPROC
15550 -END(system_call)
15551 +ENDPROC(system_call)
15552
15553 /*
15554 * Certain special system calls that need to save a complete full stack frame.
15555 @@ -681,7 +981,7 @@ ENTRY(\label)
15556 call \func
15557 jmp ptregscall_common
15558 CFI_ENDPROC
15559 -END(\label)
15560 +ENDPROC(\label)
15561 .endm
15562
15563 PTREGSCALL stub_clone, sys_clone, %r8
15564 @@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
15565 movq_cfi_restore R12+8, r12
15566 movq_cfi_restore RBP+8, rbp
15567 movq_cfi_restore RBX+8, rbx
15568 + pax_force_retaddr
15569 ret $REST_SKIP /* pop extended registers */
15570 CFI_ENDPROC
15571 -END(ptregscall_common)
15572 +ENDPROC(ptregscall_common)
15573
15574 ENTRY(stub_execve)
15575 CFI_STARTPROC
15576 @@ -716,7 +1017,7 @@ ENTRY(stub_execve)
15577 RESTORE_REST
15578 jmp int_ret_from_sys_call
15579 CFI_ENDPROC
15580 -END(stub_execve)
15581 +ENDPROC(stub_execve)
15582
15583 /*
15584 * sigreturn is special because it needs to restore all registers on return.
15585 @@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
15586 RESTORE_REST
15587 jmp int_ret_from_sys_call
15588 CFI_ENDPROC
15589 -END(stub_rt_sigreturn)
15590 +ENDPROC(stub_rt_sigreturn)
15591
15592 /*
15593 * Build the entry stubs and pointer table with some assembler magic.
15594 @@ -769,7 +1070,7 @@ vector=vector+1
15595 2: jmp common_interrupt
15596 .endr
15597 CFI_ENDPROC
15598 -END(irq_entries_start)
15599 +ENDPROC(irq_entries_start)
15600
15601 .previous
15602 END(interrupt)
15603 @@ -789,6 +1090,16 @@ END(interrupt)
15604 subq $ORIG_RAX-RBP, %rsp
15605 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15606 SAVE_ARGS_IRQ
15607 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15608 + testb $3, CS(%rdi)
15609 + jnz 1f
15610 + pax_enter_kernel
15611 + jmp 2f
15612 +1: pax_enter_kernel_user
15613 +2:
15614 +#else
15615 + pax_enter_kernel
15616 +#endif
15617 call \func
15618 .endm
15619
15620 @@ -820,7 +1131,7 @@ ret_from_intr:
15621
15622 exit_intr:
15623 GET_THREAD_INFO(%rcx)
15624 - testl $3,CS-ARGOFFSET(%rsp)
15625 + testb $3,CS-ARGOFFSET(%rsp)
15626 je retint_kernel
15627
15628 /* Interrupt came from user space */
15629 @@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
15630 * The iretq could re-enable interrupts:
15631 */
15632 DISABLE_INTERRUPTS(CLBR_ANY)
15633 + pax_exit_kernel_user
15634 TRACE_IRQS_IRETQ
15635 SWAPGS
15636 jmp restore_args
15637
15638 retint_restore_args: /* return to kernel space */
15639 DISABLE_INTERRUPTS(CLBR_ANY)
15640 + pax_exit_kernel
15641 + pax_force_retaddr RIP-ARGOFFSET
15642 /*
15643 * The iretq could re-enable interrupts:
15644 */
15645 @@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
15646 #endif
15647
15648 CFI_ENDPROC
15649 -END(common_interrupt)
15650 +ENDPROC(common_interrupt)
15651 /*
15652 * End of kprobes section
15653 */
15654 @@ -953,7 +1267,7 @@ ENTRY(\sym)
15655 interrupt \do_sym
15656 jmp ret_from_intr
15657 CFI_ENDPROC
15658 -END(\sym)
15659 +ENDPROC(\sym)
15660 .endm
15661
15662 #ifdef CONFIG_SMP
15663 @@ -1026,12 +1340,22 @@ ENTRY(\sym)
15664 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15665 call error_entry
15666 DEFAULT_FRAME 0
15667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15668 + testb $3, CS(%rsp)
15669 + jnz 1f
15670 + pax_enter_kernel
15671 + jmp 2f
15672 +1: pax_enter_kernel_user
15673 +2:
15674 +#else
15675 + pax_enter_kernel
15676 +#endif
15677 movq %rsp,%rdi /* pt_regs pointer */
15678 xorl %esi,%esi /* no error code */
15679 call \do_sym
15680 jmp error_exit /* %ebx: no swapgs flag */
15681 CFI_ENDPROC
15682 -END(\sym)
15683 +ENDPROC(\sym)
15684 .endm
15685
15686 .macro paranoidzeroentry sym do_sym
15687 @@ -1043,15 +1367,25 @@ ENTRY(\sym)
15688 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15689 call save_paranoid
15690 TRACE_IRQS_OFF
15691 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15692 + testb $3, CS(%rsp)
15693 + jnz 1f
15694 + pax_enter_kernel
15695 + jmp 2f
15696 +1: pax_enter_kernel_user
15697 +2:
15698 +#else
15699 + pax_enter_kernel
15700 +#endif
15701 movq %rsp,%rdi /* pt_regs pointer */
15702 xorl %esi,%esi /* no error code */
15703 call \do_sym
15704 jmp paranoid_exit /* %ebx: no swapgs flag */
15705 CFI_ENDPROC
15706 -END(\sym)
15707 +ENDPROC(\sym)
15708 .endm
15709
15710 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15711 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15712 .macro paranoidzeroentry_ist sym do_sym ist
15713 ENTRY(\sym)
15714 INTR_FRAME
15715 @@ -1061,14 +1395,30 @@ ENTRY(\sym)
15716 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15717 call save_paranoid
15718 TRACE_IRQS_OFF
15719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15720 + testb $3, CS(%rsp)
15721 + jnz 1f
15722 + pax_enter_kernel
15723 + jmp 2f
15724 +1: pax_enter_kernel_user
15725 +2:
15726 +#else
15727 + pax_enter_kernel
15728 +#endif
15729 movq %rsp,%rdi /* pt_regs pointer */
15730 xorl %esi,%esi /* no error code */
15731 +#ifdef CONFIG_SMP
15732 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15733 + lea init_tss(%r12), %r12
15734 +#else
15735 + lea init_tss(%rip), %r12
15736 +#endif
15737 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15738 call \do_sym
15739 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15740 jmp paranoid_exit /* %ebx: no swapgs flag */
15741 CFI_ENDPROC
15742 -END(\sym)
15743 +ENDPROC(\sym)
15744 .endm
15745
15746 .macro errorentry sym do_sym
15747 @@ -1079,13 +1429,23 @@ ENTRY(\sym)
15748 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15749 call error_entry
15750 DEFAULT_FRAME 0
15751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15752 + testb $3, CS(%rsp)
15753 + jnz 1f
15754 + pax_enter_kernel
15755 + jmp 2f
15756 +1: pax_enter_kernel_user
15757 +2:
15758 +#else
15759 + pax_enter_kernel
15760 +#endif
15761 movq %rsp,%rdi /* pt_regs pointer */
15762 movq ORIG_RAX(%rsp),%rsi /* get error code */
15763 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15764 call \do_sym
15765 jmp error_exit /* %ebx: no swapgs flag */
15766 CFI_ENDPROC
15767 -END(\sym)
15768 +ENDPROC(\sym)
15769 .endm
15770
15771 /* error code is on the stack already */
15772 @@ -1098,13 +1458,23 @@ ENTRY(\sym)
15773 call save_paranoid
15774 DEFAULT_FRAME 0
15775 TRACE_IRQS_OFF
15776 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15777 + testb $3, CS(%rsp)
15778 + jnz 1f
15779 + pax_enter_kernel
15780 + jmp 2f
15781 +1: pax_enter_kernel_user
15782 +2:
15783 +#else
15784 + pax_enter_kernel
15785 +#endif
15786 movq %rsp,%rdi /* pt_regs pointer */
15787 movq ORIG_RAX(%rsp),%rsi /* get error code */
15788 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15789 call \do_sym
15790 jmp paranoid_exit /* %ebx: no swapgs flag */
15791 CFI_ENDPROC
15792 -END(\sym)
15793 +ENDPROC(\sym)
15794 .endm
15795
15796 zeroentry divide_error do_divide_error
15797 @@ -1134,9 +1504,10 @@ gs_change:
15798 2: mfence /* workaround */
15799 SWAPGS
15800 popfq_cfi
15801 + pax_force_retaddr
15802 ret
15803 CFI_ENDPROC
15804 -END(native_load_gs_index)
15805 +ENDPROC(native_load_gs_index)
15806
15807 .section __ex_table,"a"
15808 .align 8
15809 @@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
15810 * Here we are in the child and the registers are set as they were
15811 * at kernel_thread() invocation in the parent.
15812 */
15813 + pax_force_fptr %rsi
15814 call *%rsi
15815 # exit
15816 mov %eax, %edi
15817 call do_exit
15818 ud2 # padding for call trace
15819 CFI_ENDPROC
15820 -END(kernel_thread_helper)
15821 +ENDPROC(kernel_thread_helper)
15822
15823 /*
15824 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15825 @@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
15826 RESTORE_REST
15827 testq %rax,%rax
15828 je int_ret_from_sys_call
15829 - RESTORE_ARGS
15830 UNFAKE_STACK_FRAME
15831 + pax_force_retaddr
15832 ret
15833 CFI_ENDPROC
15834 -END(kernel_execve)
15835 +ENDPROC(kernel_execve)
15836
15837 /* Call softirq on interrupt stack. Interrupts are off. */
15838 ENTRY(call_softirq)
15839 @@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
15840 CFI_DEF_CFA_REGISTER rsp
15841 CFI_ADJUST_CFA_OFFSET -8
15842 decl PER_CPU_VAR(irq_count)
15843 + pax_force_retaddr
15844 ret
15845 CFI_ENDPROC
15846 -END(call_softirq)
15847 +ENDPROC(call_softirq)
15848
15849 #ifdef CONFIG_XEN
15850 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
15851 @@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
15852 decl PER_CPU_VAR(irq_count)
15853 jmp error_exit
15854 CFI_ENDPROC
15855 -END(xen_do_hypervisor_callback)
15856 +ENDPROC(xen_do_hypervisor_callback)
15857
15858 /*
15859 * Hypervisor uses this for application faults while it executes.
15860 @@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
15861 SAVE_ALL
15862 jmp error_exit
15863 CFI_ENDPROC
15864 -END(xen_failsafe_callback)
15865 +ENDPROC(xen_failsafe_callback)
15866
15867 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
15868 xen_hvm_callback_vector xen_evtchn_do_upcall
15869 @@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
15870 TRACE_IRQS_OFF
15871 testl %ebx,%ebx /* swapgs needed? */
15872 jnz paranoid_restore
15873 - testl $3,CS(%rsp)
15874 + testb $3,CS(%rsp)
15875 jnz paranoid_userspace
15876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15877 + pax_exit_kernel
15878 + TRACE_IRQS_IRETQ 0
15879 + SWAPGS_UNSAFE_STACK
15880 + RESTORE_ALL 8
15881 + pax_force_retaddr_bts
15882 + jmp irq_return
15883 +#endif
15884 paranoid_swapgs:
15885 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15886 + pax_exit_kernel_user
15887 +#else
15888 + pax_exit_kernel
15889 +#endif
15890 TRACE_IRQS_IRETQ 0
15891 SWAPGS_UNSAFE_STACK
15892 RESTORE_ALL 8
15893 jmp irq_return
15894 paranoid_restore:
15895 + pax_exit_kernel
15896 TRACE_IRQS_IRETQ 0
15897 RESTORE_ALL 8
15898 + pax_force_retaddr_bts
15899 jmp irq_return
15900 paranoid_userspace:
15901 GET_THREAD_INFO(%rcx)
15902 @@ -1399,7 +1787,7 @@ paranoid_schedule:
15903 TRACE_IRQS_OFF
15904 jmp paranoid_userspace
15905 CFI_ENDPROC
15906 -END(paranoid_exit)
15907 +ENDPROC(paranoid_exit)
15908
15909 /*
15910 * Exception entry point. This expects an error code/orig_rax on the stack.
15911 @@ -1426,12 +1814,13 @@ ENTRY(error_entry)
15912 movq_cfi r14, R14+8
15913 movq_cfi r15, R15+8
15914 xorl %ebx,%ebx
15915 - testl $3,CS+8(%rsp)
15916 + testb $3,CS+8(%rsp)
15917 je error_kernelspace
15918 error_swapgs:
15919 SWAPGS
15920 error_sti:
15921 TRACE_IRQS_OFF
15922 + pax_force_retaddr_bts
15923 ret
15924
15925 /*
15926 @@ -1458,7 +1847,7 @@ bstep_iret:
15927 movq %rcx,RIP+8(%rsp)
15928 jmp error_swapgs
15929 CFI_ENDPROC
15930 -END(error_entry)
15931 +ENDPROC(error_entry)
15932
15933
15934 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
15935 @@ -1478,7 +1867,7 @@ ENTRY(error_exit)
15936 jnz retint_careful
15937 jmp retint_swapgs
15938 CFI_ENDPROC
15939 -END(error_exit)
15940 +ENDPROC(error_exit)
15941
15942 /*
15943 * Test if a given stack is an NMI stack or not.
15944 @@ -1535,9 +1924,11 @@ ENTRY(nmi)
15945 * If %cs was not the kernel segment, then the NMI triggered in user
15946 * space, which means it is definitely not nested.
15947 */
15948 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
15949 + je 1f
15950 cmpl $__KERNEL_CS, 16(%rsp)
15951 jne first_nmi
15952 -
15953 +1:
15954 /*
15955 * Check the special variable on the stack to see if NMIs are
15956 * executing.
15957 @@ -1659,6 +2050,16 @@ restart_nmi:
15958 */
15959 call save_paranoid
15960 DEFAULT_FRAME 0
15961 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15962 + testb $3, CS(%rsp)
15963 + jnz 1f
15964 + pax_enter_kernel
15965 + jmp 2f
15966 +1: pax_enter_kernel_user
15967 +2:
15968 +#else
15969 + pax_enter_kernel
15970 +#endif
15971 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
15972 movq %rsp,%rdi
15973 movq $-1,%rsi
15974 @@ -1666,14 +2067,25 @@ restart_nmi:
15975 testl %ebx,%ebx /* swapgs needed? */
15976 jnz nmi_restore
15977 nmi_swapgs:
15978 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15979 + pax_exit_kernel_user
15980 +#else
15981 + pax_exit_kernel
15982 +#endif
15983 SWAPGS_UNSAFE_STACK
15984 + RESTORE_ALL 8
15985 + /* Clear the NMI executing stack variable */
15986 + movq $0, 10*8(%rsp)
15987 + jmp irq_return
15988 nmi_restore:
15989 + pax_exit_kernel
15990 RESTORE_ALL 8
15991 + pax_force_retaddr_bts
15992 /* Clear the NMI executing stack variable */
15993 movq $0, 10*8(%rsp)
15994 jmp irq_return
15995 CFI_ENDPROC
15996 -END(nmi)
15997 +ENDPROC(nmi)
15998
15999 /*
16000 * If an NMI hit an iret because of an exception or breakpoint,
16001 @@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16002 mov $-ENOSYS,%eax
16003 sysret
16004 CFI_ENDPROC
16005 -END(ignore_sysret)
16006 +ENDPROC(ignore_sysret)
16007
16008 /*
16009 * End of kprobes section
16010 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16011 index c9a281f..ce2f317 100644
16012 --- a/arch/x86/kernel/ftrace.c
16013 +++ b/arch/x86/kernel/ftrace.c
16014 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16015 static const void *mod_code_newcode; /* holds the text to write to the IP */
16016
16017 static unsigned nmi_wait_count;
16018 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16019 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16020
16021 int ftrace_arch_read_dyn_info(char *buf, int size)
16022 {
16023 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16024
16025 r = snprintf(buf, size, "%u %u",
16026 nmi_wait_count,
16027 - atomic_read(&nmi_update_count));
16028 + atomic_read_unchecked(&nmi_update_count));
16029 return r;
16030 }
16031
16032 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16033
16034 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16035 smp_rmb();
16036 + pax_open_kernel();
16037 ftrace_mod_code();
16038 - atomic_inc(&nmi_update_count);
16039 + pax_close_kernel();
16040 + atomic_inc_unchecked(&nmi_update_count);
16041 }
16042 /* Must have previous changes seen before executions */
16043 smp_mb();
16044 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16045 {
16046 unsigned char replaced[MCOUNT_INSN_SIZE];
16047
16048 + ip = ktla_ktva(ip);
16049 +
16050 /*
16051 * Note: Due to modules and __init, code can
16052 * disappear and change, we need to protect against faulting
16053 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16054 unsigned char old[MCOUNT_INSN_SIZE], *new;
16055 int ret;
16056
16057 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16058 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16059 new = ftrace_call_replace(ip, (unsigned long)func);
16060 ret = ftrace_modify_code(ip, old, new);
16061
16062 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16063 {
16064 unsigned char code[MCOUNT_INSN_SIZE];
16065
16066 + ip = ktla_ktva(ip);
16067 +
16068 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16069 return -EFAULT;
16070
16071 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16072 index 51ff186..9e77418 100644
16073 --- a/arch/x86/kernel/head32.c
16074 +++ b/arch/x86/kernel/head32.c
16075 @@ -19,6 +19,7 @@
16076 #include <asm/io_apic.h>
16077 #include <asm/bios_ebda.h>
16078 #include <asm/tlbflush.h>
16079 +#include <asm/boot.h>
16080
16081 static void __init i386_default_early_setup(void)
16082 {
16083 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16084
16085 void __init i386_start_kernel(void)
16086 {
16087 - memblock_reserve(__pa_symbol(&_text),
16088 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16089 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16090
16091 #ifdef CONFIG_BLK_DEV_INITRD
16092 /* Reserve INITRD */
16093 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16094 index ce0be7c..c41476e 100644
16095 --- a/arch/x86/kernel/head_32.S
16096 +++ b/arch/x86/kernel/head_32.S
16097 @@ -25,6 +25,12 @@
16098 /* Physical address */
16099 #define pa(X) ((X) - __PAGE_OFFSET)
16100
16101 +#ifdef CONFIG_PAX_KERNEXEC
16102 +#define ta(X) (X)
16103 +#else
16104 +#define ta(X) ((X) - __PAGE_OFFSET)
16105 +#endif
16106 +
16107 /*
16108 * References to members of the new_cpu_data structure.
16109 */
16110 @@ -54,11 +60,7 @@
16111 * and small than max_low_pfn, otherwise will waste some page table entries
16112 */
16113
16114 -#if PTRS_PER_PMD > 1
16115 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16116 -#else
16117 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16118 -#endif
16119 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16120
16121 /* Number of possible pages in the lowmem region */
16122 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16123 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16124 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16125
16126 /*
16127 + * Real beginning of normal "text" segment
16128 + */
16129 +ENTRY(stext)
16130 +ENTRY(_stext)
16131 +
16132 +/*
16133 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16134 * %esi points to the real-mode code as a 32-bit pointer.
16135 * CS and DS must be 4 GB flat segments, but we don't depend on
16136 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16137 * can.
16138 */
16139 __HEAD
16140 +
16141 +#ifdef CONFIG_PAX_KERNEXEC
16142 + jmp startup_32
16143 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16144 +.fill PAGE_SIZE-5,1,0xcc
16145 +#endif
16146 +
16147 ENTRY(startup_32)
16148 movl pa(stack_start),%ecx
16149
16150 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16151 2:
16152 leal -__PAGE_OFFSET(%ecx),%esp
16153
16154 +#ifdef CONFIG_SMP
16155 + movl $pa(cpu_gdt_table),%edi
16156 + movl $__per_cpu_load,%eax
16157 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16158 + rorl $16,%eax
16159 + movb %al,__KERNEL_PERCPU + 4(%edi)
16160 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16161 + movl $__per_cpu_end - 1,%eax
16162 + subl $__per_cpu_start,%eax
16163 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16164 +#endif
16165 +
16166 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16167 + movl $NR_CPUS,%ecx
16168 + movl $pa(cpu_gdt_table),%edi
16169 +1:
16170 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16171 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16172 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16173 + addl $PAGE_SIZE_asm,%edi
16174 + loop 1b
16175 +#endif
16176 +
16177 +#ifdef CONFIG_PAX_KERNEXEC
16178 + movl $pa(boot_gdt),%edi
16179 + movl $__LOAD_PHYSICAL_ADDR,%eax
16180 + movw %ax,__BOOT_CS + 2(%edi)
16181 + rorl $16,%eax
16182 + movb %al,__BOOT_CS + 4(%edi)
16183 + movb %ah,__BOOT_CS + 7(%edi)
16184 + rorl $16,%eax
16185 +
16186 + ljmp $(__BOOT_CS),$1f
16187 +1:
16188 +
16189 + movl $NR_CPUS,%ecx
16190 + movl $pa(cpu_gdt_table),%edi
16191 + addl $__PAGE_OFFSET,%eax
16192 +1:
16193 + movw %ax,__KERNEL_CS + 2(%edi)
16194 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16195 + rorl $16,%eax
16196 + movb %al,__KERNEL_CS + 4(%edi)
16197 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16198 + movb %ah,__KERNEL_CS + 7(%edi)
16199 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16200 + rorl $16,%eax
16201 + addl $PAGE_SIZE_asm,%edi
16202 + loop 1b
16203 +#endif
16204 +
16205 /*
16206 * Clear BSS first so that there are no surprises...
16207 */
16208 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16209 movl %eax, pa(max_pfn_mapped)
16210
16211 /* Do early initialization of the fixmap area */
16212 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16213 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16214 +#ifdef CONFIG_COMPAT_VDSO
16215 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16216 +#else
16217 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16218 +#endif
16219 #else /* Not PAE */
16220
16221 page_pde_offset = (__PAGE_OFFSET >> 20);
16222 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16223 movl %eax, pa(max_pfn_mapped)
16224
16225 /* Do early initialization of the fixmap area */
16226 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16227 - movl %eax,pa(initial_page_table+0xffc)
16228 +#ifdef CONFIG_COMPAT_VDSO
16229 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16230 +#else
16231 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16232 +#endif
16233 #endif
16234
16235 #ifdef CONFIG_PARAVIRT
16236 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16237 cmpl $num_subarch_entries, %eax
16238 jae bad_subarch
16239
16240 - movl pa(subarch_entries)(,%eax,4), %eax
16241 - subl $__PAGE_OFFSET, %eax
16242 - jmp *%eax
16243 + jmp *pa(subarch_entries)(,%eax,4)
16244
16245 bad_subarch:
16246 WEAK(lguest_entry)
16247 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16248 __INITDATA
16249
16250 subarch_entries:
16251 - .long default_entry /* normal x86/PC */
16252 - .long lguest_entry /* lguest hypervisor */
16253 - .long xen_entry /* Xen hypervisor */
16254 - .long default_entry /* Moorestown MID */
16255 + .long ta(default_entry) /* normal x86/PC */
16256 + .long ta(lguest_entry) /* lguest hypervisor */
16257 + .long ta(xen_entry) /* Xen hypervisor */
16258 + .long ta(default_entry) /* Moorestown MID */
16259 num_subarch_entries = (. - subarch_entries) / 4
16260 .previous
16261 #else
16262 @@ -312,6 +382,7 @@ default_entry:
16263 orl %edx,%eax
16264 movl %eax,%cr4
16265
16266 +#ifdef CONFIG_X86_PAE
16267 testb $X86_CR4_PAE, %al # check if PAE is enabled
16268 jz 6f
16269
16270 @@ -340,6 +411,9 @@ default_entry:
16271 /* Make changes effective */
16272 wrmsr
16273
16274 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16275 +#endif
16276 +
16277 6:
16278
16279 /*
16280 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16281 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16282 movl %eax,%ss # after changing gdt.
16283
16284 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16285 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16286 movl %eax,%ds
16287 movl %eax,%es
16288
16289 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16290 */
16291 cmpb $0,ready
16292 jne 1f
16293 - movl $gdt_page,%eax
16294 + movl $cpu_gdt_table,%eax
16295 movl $stack_canary,%ecx
16296 +#ifdef CONFIG_SMP
16297 + addl $__per_cpu_load,%ecx
16298 +#endif
16299 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16300 shrl $16, %ecx
16301 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16302 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16303 1:
16304 -#endif
16305 movl $(__KERNEL_STACK_CANARY),%eax
16306 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16307 + movl $(__USER_DS),%eax
16308 +#else
16309 + xorl %eax,%eax
16310 +#endif
16311 movl %eax,%gs
16312
16313 xorl %eax,%eax # Clear LDT
16314 @@ -558,22 +639,22 @@ early_page_fault:
16315 jmp early_fault
16316
16317 early_fault:
16318 - cld
16319 #ifdef CONFIG_PRINTK
16320 + cmpl $1,%ss:early_recursion_flag
16321 + je hlt_loop
16322 + incl %ss:early_recursion_flag
16323 + cld
16324 pusha
16325 movl $(__KERNEL_DS),%eax
16326 movl %eax,%ds
16327 movl %eax,%es
16328 - cmpl $2,early_recursion_flag
16329 - je hlt_loop
16330 - incl early_recursion_flag
16331 movl %cr2,%eax
16332 pushl %eax
16333 pushl %edx /* trapno */
16334 pushl $fault_msg
16335 call printk
16336 +; call dump_stack
16337 #endif
16338 - call dump_stack
16339 hlt_loop:
16340 hlt
16341 jmp hlt_loop
16342 @@ -581,8 +662,11 @@ hlt_loop:
16343 /* This is the default interrupt "handler" :-) */
16344 ALIGN
16345 ignore_int:
16346 - cld
16347 #ifdef CONFIG_PRINTK
16348 + cmpl $2,%ss:early_recursion_flag
16349 + je hlt_loop
16350 + incl %ss:early_recursion_flag
16351 + cld
16352 pushl %eax
16353 pushl %ecx
16354 pushl %edx
16355 @@ -591,9 +675,6 @@ ignore_int:
16356 movl $(__KERNEL_DS),%eax
16357 movl %eax,%ds
16358 movl %eax,%es
16359 - cmpl $2,early_recursion_flag
16360 - je hlt_loop
16361 - incl early_recursion_flag
16362 pushl 16(%esp)
16363 pushl 24(%esp)
16364 pushl 32(%esp)
16365 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16366 /*
16367 * BSS section
16368 */
16369 -__PAGE_ALIGNED_BSS
16370 - .align PAGE_SIZE
16371 #ifdef CONFIG_X86_PAE
16372 +.section .initial_pg_pmd,"a",@progbits
16373 initial_pg_pmd:
16374 .fill 1024*KPMDS,4,0
16375 #else
16376 +.section .initial_page_table,"a",@progbits
16377 ENTRY(initial_page_table)
16378 .fill 1024,4,0
16379 #endif
16380 +.section .initial_pg_fixmap,"a",@progbits
16381 initial_pg_fixmap:
16382 .fill 1024,4,0
16383 +.section .empty_zero_page,"a",@progbits
16384 ENTRY(empty_zero_page)
16385 .fill 4096,1,0
16386 +.section .swapper_pg_dir,"a",@progbits
16387 ENTRY(swapper_pg_dir)
16388 +#ifdef CONFIG_X86_PAE
16389 + .fill 4,8,0
16390 +#else
16391 .fill 1024,4,0
16392 +#endif
16393 +
16394 +/*
16395 + * The IDT has to be page-aligned to simplify the Pentium
16396 + * F0 0F bug workaround.. We have a special link segment
16397 + * for this.
16398 + */
16399 +.section .idt,"a",@progbits
16400 +ENTRY(idt_table)
16401 + .fill 256,8,0
16402
16403 /*
16404 * This starts the data section.
16405 */
16406 #ifdef CONFIG_X86_PAE
16407 -__PAGE_ALIGNED_DATA
16408 - /* Page-aligned for the benefit of paravirt? */
16409 - .align PAGE_SIZE
16410 +.section .initial_page_table,"a",@progbits
16411 ENTRY(initial_page_table)
16412 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16413 # if KPMDS == 3
16414 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16415 # error "Kernel PMDs should be 1, 2 or 3"
16416 # endif
16417 .align PAGE_SIZE /* needs to be page-sized too */
16418 +
16419 +#ifdef CONFIG_PAX_PER_CPU_PGD
16420 +ENTRY(cpu_pgd)
16421 + .rept NR_CPUS
16422 + .fill 4,8,0
16423 + .endr
16424 +#endif
16425 +
16426 #endif
16427
16428 .data
16429 .balign 4
16430 ENTRY(stack_start)
16431 - .long init_thread_union+THREAD_SIZE
16432 + .long init_thread_union+THREAD_SIZE-8
16433
16434 +ready: .byte 0
16435 +
16436 +.section .rodata,"a",@progbits
16437 early_recursion_flag:
16438 .long 0
16439
16440 -ready: .byte 0
16441 -
16442 int_msg:
16443 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16444
16445 @@ -707,7 +811,7 @@ fault_msg:
16446 .word 0 # 32 bit align gdt_desc.address
16447 boot_gdt_descr:
16448 .word __BOOT_DS+7
16449 - .long boot_gdt - __PAGE_OFFSET
16450 + .long pa(boot_gdt)
16451
16452 .word 0 # 32-bit align idt_desc.address
16453 idt_descr:
16454 @@ -718,7 +822,7 @@ idt_descr:
16455 .word 0 # 32 bit align gdt_desc.address
16456 ENTRY(early_gdt_descr)
16457 .word GDT_ENTRIES*8-1
16458 - .long gdt_page /* Overwritten for secondary CPUs */
16459 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
16460
16461 /*
16462 * The boot_gdt must mirror the equivalent in setup.S and is
16463 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16464 .align L1_CACHE_BYTES
16465 ENTRY(boot_gdt)
16466 .fill GDT_ENTRY_BOOT_CS,8,0
16467 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16468 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16469 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16470 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16471 +
16472 + .align PAGE_SIZE_asm
16473 +ENTRY(cpu_gdt_table)
16474 + .rept NR_CPUS
16475 + .quad 0x0000000000000000 /* NULL descriptor */
16476 + .quad 0x0000000000000000 /* 0x0b reserved */
16477 + .quad 0x0000000000000000 /* 0x13 reserved */
16478 + .quad 0x0000000000000000 /* 0x1b reserved */
16479 +
16480 +#ifdef CONFIG_PAX_KERNEXEC
16481 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16482 +#else
16483 + .quad 0x0000000000000000 /* 0x20 unused */
16484 +#endif
16485 +
16486 + .quad 0x0000000000000000 /* 0x28 unused */
16487 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16488 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16489 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16490 + .quad 0x0000000000000000 /* 0x4b reserved */
16491 + .quad 0x0000000000000000 /* 0x53 reserved */
16492 + .quad 0x0000000000000000 /* 0x5b reserved */
16493 +
16494 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16495 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16496 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16497 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16498 +
16499 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16500 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16501 +
16502 + /*
16503 + * Segments used for calling PnP BIOS have byte granularity.
16504 + * The code segments and data segments have fixed 64k limits,
16505 + * the transfer segment sizes are set at run time.
16506 + */
16507 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
16508 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
16509 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
16510 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
16511 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
16512 +
16513 + /*
16514 + * The APM segments have byte granularity and their bases
16515 + * are set at run time. All have 64k limits.
16516 + */
16517 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16518 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16519 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
16520 +
16521 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16522 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16523 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16524 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16525 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16526 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16527 +
16528 + /* Be sure this is zeroed to avoid false validations in Xen */
16529 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16530 + .endr
16531 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16532 index 40f4eb3..6d24d9d 100644
16533 --- a/arch/x86/kernel/head_64.S
16534 +++ b/arch/x86/kernel/head_64.S
16535 @@ -19,6 +19,8 @@
16536 #include <asm/cache.h>
16537 #include <asm/processor-flags.h>
16538 #include <asm/percpu.h>
16539 +#include <asm/cpufeature.h>
16540 +#include <asm/alternative-asm.h>
16541
16542 #ifdef CONFIG_PARAVIRT
16543 #include <asm/asm-offsets.h>
16544 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16545 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16546 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16547 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16548 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
16549 +L3_VMALLOC_START = pud_index(VMALLOC_START)
16550 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
16551 +L3_VMALLOC_END = pud_index(VMALLOC_END)
16552 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16553 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16554
16555 .text
16556 __HEAD
16557 @@ -85,35 +93,23 @@ startup_64:
16558 */
16559 addq %rbp, init_level4_pgt + 0(%rip)
16560 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16561 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16562 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16563 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16564 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16565
16566 addq %rbp, level3_ident_pgt + 0(%rip)
16567 +#ifndef CONFIG_XEN
16568 + addq %rbp, level3_ident_pgt + 8(%rip)
16569 +#endif
16570
16571 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16572 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16573 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16574 +
16575 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16576 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16577
16578 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16579 -
16580 - /* Add an Identity mapping if I am above 1G */
16581 - leaq _text(%rip), %rdi
16582 - andq $PMD_PAGE_MASK, %rdi
16583 -
16584 - movq %rdi, %rax
16585 - shrq $PUD_SHIFT, %rax
16586 - andq $(PTRS_PER_PUD - 1), %rax
16587 - jz ident_complete
16588 -
16589 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16590 - leaq level3_ident_pgt(%rip), %rbx
16591 - movq %rdx, 0(%rbx, %rax, 8)
16592 -
16593 - movq %rdi, %rax
16594 - shrq $PMD_SHIFT, %rax
16595 - andq $(PTRS_PER_PMD - 1), %rax
16596 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16597 - leaq level2_spare_pgt(%rip), %rbx
16598 - movq %rdx, 0(%rbx, %rax, 8)
16599 -ident_complete:
16600 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16601
16602 /*
16603 * Fixup the kernel text+data virtual addresses. Note that
16604 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16605 * after the boot processor executes this code.
16606 */
16607
16608 - /* Enable PAE mode and PGE */
16609 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16610 + /* Enable PAE mode and PSE/PGE */
16611 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16612 movq %rax, %cr4
16613
16614 /* Setup early boot stage 4 level pagetables. */
16615 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16616 movl $MSR_EFER, %ecx
16617 rdmsr
16618 btsl $_EFER_SCE, %eax /* Enable System Call */
16619 - btl $20,%edi /* No Execute supported? */
16620 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16621 jnc 1f
16622 btsl $_EFER_NX, %eax
16623 + leaq init_level4_pgt(%rip), %rdi
16624 +#ifndef CONFIG_EFI
16625 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16626 +#endif
16627 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16628 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16629 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16630 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16631 1: wrmsr /* Make changes effective */
16632
16633 /* Setup cr0 */
16634 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16635 * jump. In addition we need to ensure %cs is set so we make this
16636 * a far return.
16637 */
16638 + pax_set_fptr_mask
16639 movq initial_code(%rip),%rax
16640 pushq $0 # fake return address to stop unwinder
16641 pushq $__KERNEL_CS # set correct cs
16642 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16643 bad_address:
16644 jmp bad_address
16645
16646 - .section ".init.text","ax"
16647 + __INIT
16648 #ifdef CONFIG_EARLY_PRINTK
16649 .globl early_idt_handlers
16650 early_idt_handlers:
16651 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16652 #endif /* EARLY_PRINTK */
16653 1: hlt
16654 jmp 1b
16655 + .previous
16656
16657 #ifdef CONFIG_EARLY_PRINTK
16658 + __INITDATA
16659 early_recursion_flag:
16660 .long 0
16661 + .previous
16662
16663 + .section .rodata,"a",@progbits
16664 early_idt_msg:
16665 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16666 early_idt_ripmsg:
16667 .asciz "RIP %s\n"
16668 + .previous
16669 #endif /* CONFIG_EARLY_PRINTK */
16670 - .previous
16671
16672 + .section .rodata,"a",@progbits
16673 #define NEXT_PAGE(name) \
16674 .balign PAGE_SIZE; \
16675 ENTRY(name)
16676 @@ -338,7 +348,6 @@ ENTRY(name)
16677 i = i + 1 ; \
16678 .endr
16679
16680 - .data
16681 /*
16682 * This default setting generates an ident mapping at address 0x100000
16683 * and a mapping for the kernel that precisely maps virtual address
16684 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16685 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16686 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16687 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16688 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
16689 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16690 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
16691 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16692 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16693 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16694 .org init_level4_pgt + L4_START_KERNEL*8, 0
16695 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16696 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16697
16698 +#ifdef CONFIG_PAX_PER_CPU_PGD
16699 +NEXT_PAGE(cpu_pgd)
16700 + .rept NR_CPUS
16701 + .fill 512,8,0
16702 + .endr
16703 +#endif
16704 +
16705 NEXT_PAGE(level3_ident_pgt)
16706 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16707 +#ifdef CONFIG_XEN
16708 .fill 511,8,0
16709 +#else
16710 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16711 + .fill 510,8,0
16712 +#endif
16713 +
16714 +NEXT_PAGE(level3_vmalloc_start_pgt)
16715 + .fill 512,8,0
16716 +
16717 +NEXT_PAGE(level3_vmalloc_end_pgt)
16718 + .fill 512,8,0
16719 +
16720 +NEXT_PAGE(level3_vmemmap_pgt)
16721 + .fill L3_VMEMMAP_START,8,0
16722 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16723
16724 NEXT_PAGE(level3_kernel_pgt)
16725 .fill L3_START_KERNEL,8,0
16726 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16727 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16728 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16729
16730 +NEXT_PAGE(level2_vmemmap_pgt)
16731 + .fill 512,8,0
16732 +
16733 NEXT_PAGE(level2_fixmap_pgt)
16734 - .fill 506,8,0
16735 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16736 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16737 - .fill 5,8,0
16738 + .fill 507,8,0
16739 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16740 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16741 + .fill 4,8,0
16742
16743 -NEXT_PAGE(level1_fixmap_pgt)
16744 +NEXT_PAGE(level1_vsyscall_pgt)
16745 .fill 512,8,0
16746
16747 -NEXT_PAGE(level2_ident_pgt)
16748 - /* Since I easily can, map the first 1G.
16749 + /* Since I easily can, map the first 2G.
16750 * Don't set NX because code runs from these pages.
16751 */
16752 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16753 +NEXT_PAGE(level2_ident_pgt)
16754 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16755
16756 NEXT_PAGE(level2_kernel_pgt)
16757 /*
16758 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
16759 * If you want to increase this then increase MODULES_VADDR
16760 * too.)
16761 */
16762 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16763 - KERNEL_IMAGE_SIZE/PMD_SIZE)
16764 -
16765 -NEXT_PAGE(level2_spare_pgt)
16766 - .fill 512, 8, 0
16767 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16768
16769 #undef PMDS
16770 #undef NEXT_PAGE
16771
16772 - .data
16773 + .align PAGE_SIZE
16774 +ENTRY(cpu_gdt_table)
16775 + .rept NR_CPUS
16776 + .quad 0x0000000000000000 /* NULL descriptor */
16777 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16778 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
16779 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
16780 + .quad 0x00cffb000000ffff /* __USER32_CS */
16781 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16782 + .quad 0x00affb000000ffff /* __USER_CS */
16783 +
16784 +#ifdef CONFIG_PAX_KERNEXEC
16785 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16786 +#else
16787 + .quad 0x0 /* unused */
16788 +#endif
16789 +
16790 + .quad 0,0 /* TSS */
16791 + .quad 0,0 /* LDT */
16792 + .quad 0,0,0 /* three TLS descriptors */
16793 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
16794 + /* asm/segment.h:GDT_ENTRIES must match this */
16795 +
16796 + /* zero the remaining page */
16797 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16798 + .endr
16799 +
16800 .align 16
16801 .globl early_gdt_descr
16802 early_gdt_descr:
16803 .word GDT_ENTRIES*8-1
16804 early_gdt_descr_base:
16805 - .quad INIT_PER_CPU_VAR(gdt_page)
16806 + .quad cpu_gdt_table
16807
16808 ENTRY(phys_base)
16809 /* This must match the first entry in level2_kernel_pgt */
16810 .quad 0x0000000000000000
16811
16812 #include "../../x86/xen/xen-head.S"
16813 -
16814 - .section .bss, "aw", @nobits
16815 +
16816 + .section .rodata,"a",@progbits
16817 .align L1_CACHE_BYTES
16818 ENTRY(idt_table)
16819 - .skip IDT_ENTRIES * 16
16820 + .fill 512,8,0
16821
16822 .align L1_CACHE_BYTES
16823 ENTRY(nmi_idt_table)
16824 - .skip IDT_ENTRIES * 16
16825 + .fill 512,8,0
16826
16827 __PAGE_ALIGNED_BSS
16828 .align PAGE_SIZE
16829 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16830 index 9c3bd4a..e1d9b35 100644
16831 --- a/arch/x86/kernel/i386_ksyms_32.c
16832 +++ b/arch/x86/kernel/i386_ksyms_32.c
16833 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16834 EXPORT_SYMBOL(cmpxchg8b_emu);
16835 #endif
16836
16837 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
16838 +
16839 /* Networking helper routines. */
16840 EXPORT_SYMBOL(csum_partial_copy_generic);
16841 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16842 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16843
16844 EXPORT_SYMBOL(__get_user_1);
16845 EXPORT_SYMBOL(__get_user_2);
16846 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
16847
16848 EXPORT_SYMBOL(csum_partial);
16849 EXPORT_SYMBOL(empty_zero_page);
16850 +
16851 +#ifdef CONFIG_PAX_KERNEXEC
16852 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
16853 +#endif
16854 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
16855 index 6104852..6114160 100644
16856 --- a/arch/x86/kernel/i8259.c
16857 +++ b/arch/x86/kernel/i8259.c
16858 @@ -210,7 +210,7 @@ spurious_8259A_irq:
16859 "spurious 8259A interrupt: IRQ%d.\n", irq);
16860 spurious_irq_mask |= irqmask;
16861 }
16862 - atomic_inc(&irq_err_count);
16863 + atomic_inc_unchecked(&irq_err_count);
16864 /*
16865 * Theoretically we do not have to handle this IRQ,
16866 * but in Linux this does not cause problems and is
16867 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
16868 index 43e9ccf..44ccf6f 100644
16869 --- a/arch/x86/kernel/init_task.c
16870 +++ b/arch/x86/kernel/init_task.c
16871 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16872 * way process stacks are handled. This is done by having a special
16873 * "init_task" linker map entry..
16874 */
16875 -union thread_union init_thread_union __init_task_data =
16876 - { INIT_THREAD_INFO(init_task) };
16877 +union thread_union init_thread_union __init_task_data;
16878
16879 /*
16880 * Initial task structure.
16881 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
16882 * section. Since TSS's are completely CPU-local, we want them
16883 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
16884 */
16885 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
16886 -
16887 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
16888 +EXPORT_SYMBOL(init_tss);
16889 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
16890 index 8c96897..be66bfa 100644
16891 --- a/arch/x86/kernel/ioport.c
16892 +++ b/arch/x86/kernel/ioport.c
16893 @@ -6,6 +6,7 @@
16894 #include <linux/sched.h>
16895 #include <linux/kernel.h>
16896 #include <linux/capability.h>
16897 +#include <linux/security.h>
16898 #include <linux/errno.h>
16899 #include <linux/types.h>
16900 #include <linux/ioport.h>
16901 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16902
16903 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
16904 return -EINVAL;
16905 +#ifdef CONFIG_GRKERNSEC_IO
16906 + if (turn_on && grsec_disable_privio) {
16907 + gr_handle_ioperm();
16908 + return -EPERM;
16909 + }
16910 +#endif
16911 if (turn_on && !capable(CAP_SYS_RAWIO))
16912 return -EPERM;
16913
16914 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16915 * because the ->io_bitmap_max value must match the bitmap
16916 * contents:
16917 */
16918 - tss = &per_cpu(init_tss, get_cpu());
16919 + tss = init_tss + get_cpu();
16920
16921 if (turn_on)
16922 bitmap_clear(t->io_bitmap_ptr, from, num);
16923 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
16924 return -EINVAL;
16925 /* Trying to gain more privileges? */
16926 if (level > old) {
16927 +#ifdef CONFIG_GRKERNSEC_IO
16928 + if (grsec_disable_privio) {
16929 + gr_handle_iopl();
16930 + return -EPERM;
16931 + }
16932 +#endif
16933 if (!capable(CAP_SYS_RAWIO))
16934 return -EPERM;
16935 }
16936 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
16937 index 7943e0c..dd32c5c 100644
16938 --- a/arch/x86/kernel/irq.c
16939 +++ b/arch/x86/kernel/irq.c
16940 @@ -18,7 +18,7 @@
16941 #include <asm/mce.h>
16942 #include <asm/hw_irq.h>
16943
16944 -atomic_t irq_err_count;
16945 +atomic_unchecked_t irq_err_count;
16946
16947 /* Function pointer for generic interrupt vector handling */
16948 void (*x86_platform_ipi_callback)(void) = NULL;
16949 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
16950 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
16951 seq_printf(p, " Machine check polls\n");
16952 #endif
16953 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
16954 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
16955 #if defined(CONFIG_X86_IO_APIC)
16956 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
16957 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
16958 #endif
16959 return 0;
16960 }
16961 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
16962
16963 u64 arch_irq_stat(void)
16964 {
16965 - u64 sum = atomic_read(&irq_err_count);
16966 + u64 sum = atomic_read_unchecked(&irq_err_count);
16967
16968 #ifdef CONFIG_X86_IO_APIC
16969 - sum += atomic_read(&irq_mis_count);
16970 + sum += atomic_read_unchecked(&irq_mis_count);
16971 #endif
16972 return sum;
16973 }
16974 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
16975 index 40fc861..9b8739b 100644
16976 --- a/arch/x86/kernel/irq_32.c
16977 +++ b/arch/x86/kernel/irq_32.c
16978 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
16979 __asm__ __volatile__("andl %%esp,%0" :
16980 "=r" (sp) : "0" (THREAD_SIZE - 1));
16981
16982 - return sp < (sizeof(struct thread_info) + STACK_WARN);
16983 + return sp < STACK_WARN;
16984 }
16985
16986 static void print_stack_overflow(void)
16987 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
16988 * per-CPU IRQ handling contexts (thread information and stack)
16989 */
16990 union irq_ctx {
16991 - struct thread_info tinfo;
16992 - u32 stack[THREAD_SIZE/sizeof(u32)];
16993 + unsigned long previous_esp;
16994 + u32 stack[THREAD_SIZE/sizeof(u32)];
16995 } __attribute__((aligned(THREAD_SIZE)));
16996
16997 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
16998 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
16999 static inline int
17000 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17001 {
17002 - union irq_ctx *curctx, *irqctx;
17003 + union irq_ctx *irqctx;
17004 u32 *isp, arg1, arg2;
17005
17006 - curctx = (union irq_ctx *) current_thread_info();
17007 irqctx = __this_cpu_read(hardirq_ctx);
17008
17009 /*
17010 @@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17011 * handler) we can't do that and just have to keep using the
17012 * current stack (which is the irq stack already after all)
17013 */
17014 - if (unlikely(curctx == irqctx))
17015 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17016 return 0;
17017
17018 /* build the stack frame on the IRQ stack */
17019 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17020 - irqctx->tinfo.task = curctx->tinfo.task;
17021 - irqctx->tinfo.previous_esp = current_stack_pointer;
17022 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17023 + irqctx->previous_esp = current_stack_pointer;
17024
17025 - /*
17026 - * Copy the softirq bits in preempt_count so that the
17027 - * softirq checks work in the hardirq context.
17028 - */
17029 - irqctx->tinfo.preempt_count =
17030 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17031 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17032 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17033 + __set_fs(MAKE_MM_SEG(0));
17034 +#endif
17035
17036 if (unlikely(overflow))
17037 call_on_stack(print_stack_overflow, isp);
17038 @@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17039 : "0" (irq), "1" (desc), "2" (isp),
17040 "D" (desc->handle_irq)
17041 : "memory", "cc", "ecx");
17042 +
17043 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17044 + __set_fs(current_thread_info()->addr_limit);
17045 +#endif
17046 +
17047 return 1;
17048 }
17049
17050 @@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17051 */
17052 void __cpuinit irq_ctx_init(int cpu)
17053 {
17054 - union irq_ctx *irqctx;
17055 -
17056 if (per_cpu(hardirq_ctx, cpu))
17057 return;
17058
17059 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17060 - THREAD_FLAGS,
17061 - THREAD_ORDER));
17062 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17063 - irqctx->tinfo.cpu = cpu;
17064 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17065 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17066 -
17067 - per_cpu(hardirq_ctx, cpu) = irqctx;
17068 -
17069 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17070 - THREAD_FLAGS,
17071 - THREAD_ORDER));
17072 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17073 - irqctx->tinfo.cpu = cpu;
17074 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17075 -
17076 - per_cpu(softirq_ctx, cpu) = irqctx;
17077 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17078 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17079
17080 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17081 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17082 @@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17083 asmlinkage void do_softirq(void)
17084 {
17085 unsigned long flags;
17086 - struct thread_info *curctx;
17087 union irq_ctx *irqctx;
17088 u32 *isp;
17089
17090 @@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17091 local_irq_save(flags);
17092
17093 if (local_softirq_pending()) {
17094 - curctx = current_thread_info();
17095 irqctx = __this_cpu_read(softirq_ctx);
17096 - irqctx->tinfo.task = curctx->task;
17097 - irqctx->tinfo.previous_esp = current_stack_pointer;
17098 + irqctx->previous_esp = current_stack_pointer;
17099
17100 /* build the stack frame on the softirq stack */
17101 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17102 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17103 +
17104 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17105 + __set_fs(MAKE_MM_SEG(0));
17106 +#endif
17107
17108 call_on_stack(__do_softirq, isp);
17109 +
17110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17111 + __set_fs(current_thread_info()->addr_limit);
17112 +#endif
17113 +
17114 /*
17115 * Shouldn't happen, we returned above if in_interrupt():
17116 */
17117 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17118 index d04d3ec..ea4b374 100644
17119 --- a/arch/x86/kernel/irq_64.c
17120 +++ b/arch/x86/kernel/irq_64.c
17121 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17122 u64 estack_top, estack_bottom;
17123 u64 curbase = (u64)task_stack_page(current);
17124
17125 - if (user_mode_vm(regs))
17126 + if (user_mode(regs))
17127 return;
17128
17129 if (regs->sp >= curbase + sizeof(struct thread_info) +
17130 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17131 index faba577..93b9e71 100644
17132 --- a/arch/x86/kernel/kgdb.c
17133 +++ b/arch/x86/kernel/kgdb.c
17134 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17135 #ifdef CONFIG_X86_32
17136 switch (regno) {
17137 case GDB_SS:
17138 - if (!user_mode_vm(regs))
17139 + if (!user_mode(regs))
17140 *(unsigned long *)mem = __KERNEL_DS;
17141 break;
17142 case GDB_SP:
17143 - if (!user_mode_vm(regs))
17144 + if (!user_mode(regs))
17145 *(unsigned long *)mem = kernel_stack_pointer(regs);
17146 break;
17147 case GDB_GS:
17148 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17149 case 'k':
17150 /* clear the trace bit */
17151 linux_regs->flags &= ~X86_EFLAGS_TF;
17152 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17153 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17154
17155 /* set the trace bit if we're stepping */
17156 if (remcomInBuffer[0] == 's') {
17157 linux_regs->flags |= X86_EFLAGS_TF;
17158 - atomic_set(&kgdb_cpu_doing_single_step,
17159 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17160 raw_smp_processor_id());
17161 }
17162
17163 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17164
17165 switch (cmd) {
17166 case DIE_DEBUG:
17167 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17168 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17169 if (user_mode(regs))
17170 return single_step_cont(regs, args);
17171 break;
17172 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17173 index 7da647d..56fe348 100644
17174 --- a/arch/x86/kernel/kprobes.c
17175 +++ b/arch/x86/kernel/kprobes.c
17176 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17177 } __attribute__((packed)) *insn;
17178
17179 insn = (struct __arch_relative_insn *)from;
17180 +
17181 + pax_open_kernel();
17182 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17183 insn->op = op;
17184 + pax_close_kernel();
17185 }
17186
17187 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17188 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17189 kprobe_opcode_t opcode;
17190 kprobe_opcode_t *orig_opcodes = opcodes;
17191
17192 - if (search_exception_tables((unsigned long)opcodes))
17193 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17194 return 0; /* Page fault may occur on this address. */
17195
17196 retry:
17197 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17198 }
17199 }
17200 insn_get_length(&insn);
17201 + pax_open_kernel();
17202 memcpy(dest, insn.kaddr, insn.length);
17203 + pax_close_kernel();
17204
17205 #ifdef CONFIG_X86_64
17206 if (insn_rip_relative(&insn)) {
17207 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17208 (u8 *) dest;
17209 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17210 disp = (u8 *) dest + insn_offset_displacement(&insn);
17211 + pax_open_kernel();
17212 *(s32 *) disp = (s32) newdisp;
17213 + pax_close_kernel();
17214 }
17215 #endif
17216 return insn.length;
17217 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17218 */
17219 __copy_instruction(p->ainsn.insn, p->addr, 0);
17220
17221 - if (can_boost(p->addr))
17222 + if (can_boost(ktla_ktva(p->addr)))
17223 p->ainsn.boostable = 0;
17224 else
17225 p->ainsn.boostable = -1;
17226
17227 - p->opcode = *p->addr;
17228 + p->opcode = *(ktla_ktva(p->addr));
17229 }
17230
17231 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17232 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17233 * nor set current_kprobe, because it doesn't use single
17234 * stepping.
17235 */
17236 - regs->ip = (unsigned long)p->ainsn.insn;
17237 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17238 preempt_enable_no_resched();
17239 return;
17240 }
17241 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17242 if (p->opcode == BREAKPOINT_INSTRUCTION)
17243 regs->ip = (unsigned long)p->addr;
17244 else
17245 - regs->ip = (unsigned long)p->ainsn.insn;
17246 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17247 }
17248
17249 /*
17250 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17251 setup_singlestep(p, regs, kcb, 0);
17252 return 1;
17253 }
17254 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17255 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17256 /*
17257 * The breakpoint instruction was removed right
17258 * after we hit it. Another cpu has removed
17259 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17260 " movq %rax, 152(%rsp)\n"
17261 RESTORE_REGS_STRING
17262 " popfq\n"
17263 +#ifdef KERNEXEC_PLUGIN
17264 + " btsq $63,(%rsp)\n"
17265 +#endif
17266 #else
17267 " pushf\n"
17268 SAVE_REGS_STRING
17269 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17270 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17271 {
17272 unsigned long *tos = stack_addr(regs);
17273 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17274 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17275 unsigned long orig_ip = (unsigned long)p->addr;
17276 kprobe_opcode_t *insn = p->ainsn.insn;
17277
17278 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17279 struct die_args *args = data;
17280 int ret = NOTIFY_DONE;
17281
17282 - if (args->regs && user_mode_vm(args->regs))
17283 + if (args->regs && user_mode(args->regs))
17284 return ret;
17285
17286 switch (val) {
17287 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17288 * Verify if the address gap is in 2GB range, because this uses
17289 * a relative jump.
17290 */
17291 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17292 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17293 if (abs(rel) > 0x7fffffff)
17294 return -ERANGE;
17295
17296 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17297 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17298
17299 /* Set probe function call */
17300 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17301 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17302
17303 /* Set returning jmp instruction at the tail of out-of-line buffer */
17304 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17305 - (u8 *)op->kp.addr + op->optinsn.size);
17306 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17307
17308 flush_icache_range((unsigned long) buf,
17309 (unsigned long) buf + TMPL_END_IDX +
17310 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17311 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17312
17313 /* Backup instructions which will be replaced by jump address */
17314 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17315 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17316 RELATIVE_ADDR_SIZE);
17317
17318 insn_buf[0] = RELATIVEJUMP_OPCODE;
17319 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17320 index ea69726..604d066 100644
17321 --- a/arch/x86/kernel/ldt.c
17322 +++ b/arch/x86/kernel/ldt.c
17323 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17324 if (reload) {
17325 #ifdef CONFIG_SMP
17326 preempt_disable();
17327 - load_LDT(pc);
17328 + load_LDT_nolock(pc);
17329 if (!cpumask_equal(mm_cpumask(current->mm),
17330 cpumask_of(smp_processor_id())))
17331 smp_call_function(flush_ldt, current->mm, 1);
17332 preempt_enable();
17333 #else
17334 - load_LDT(pc);
17335 + load_LDT_nolock(pc);
17336 #endif
17337 }
17338 if (oldsize) {
17339 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17340 return err;
17341
17342 for (i = 0; i < old->size; i++)
17343 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17344 + write_ldt_entry(new->ldt, i, old->ldt + i);
17345 return 0;
17346 }
17347
17348 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17349 retval = copy_ldt(&mm->context, &old_mm->context);
17350 mutex_unlock(&old_mm->context.lock);
17351 }
17352 +
17353 + if (tsk == current) {
17354 + mm->context.vdso = 0;
17355 +
17356 +#ifdef CONFIG_X86_32
17357 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17358 + mm->context.user_cs_base = 0UL;
17359 + mm->context.user_cs_limit = ~0UL;
17360 +
17361 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17362 + cpus_clear(mm->context.cpu_user_cs_mask);
17363 +#endif
17364 +
17365 +#endif
17366 +#endif
17367 +
17368 + }
17369 +
17370 return retval;
17371 }
17372
17373 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17374 }
17375 }
17376
17377 +#ifdef CONFIG_PAX_SEGMEXEC
17378 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17379 + error = -EINVAL;
17380 + goto out_unlock;
17381 + }
17382 +#endif
17383 +
17384 fill_ldt(&ldt, &ldt_info);
17385 if (oldmode)
17386 ldt.avl = 0;
17387 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17388 index a3fa43b..8966f4c 100644
17389 --- a/arch/x86/kernel/machine_kexec_32.c
17390 +++ b/arch/x86/kernel/machine_kexec_32.c
17391 @@ -27,7 +27,7 @@
17392 #include <asm/cacheflush.h>
17393 #include <asm/debugreg.h>
17394
17395 -static void set_idt(void *newidt, __u16 limit)
17396 +static void set_idt(struct desc_struct *newidt, __u16 limit)
17397 {
17398 struct desc_ptr curidt;
17399
17400 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17401 }
17402
17403
17404 -static void set_gdt(void *newgdt, __u16 limit)
17405 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17406 {
17407 struct desc_ptr curgdt;
17408
17409 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17410 }
17411
17412 control_page = page_address(image->control_code_page);
17413 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17414 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17415
17416 relocate_kernel_ptr = control_page;
17417 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17418 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17419 index 3ca42d0..7cff8cc 100644
17420 --- a/arch/x86/kernel/microcode_intel.c
17421 +++ b/arch/x86/kernel/microcode_intel.c
17422 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17423
17424 static int get_ucode_user(void *to, const void *from, size_t n)
17425 {
17426 - return copy_from_user(to, from, n);
17427 + return copy_from_user(to, (const void __force_user *)from, n);
17428 }
17429
17430 static enum ucode_state
17431 request_microcode_user(int cpu, const void __user *buf, size_t size)
17432 {
17433 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17434 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17435 }
17436
17437 static void microcode_fini_cpu(int cpu)
17438 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17439 index 925179f..267ac7a 100644
17440 --- a/arch/x86/kernel/module.c
17441 +++ b/arch/x86/kernel/module.c
17442 @@ -36,15 +36,60 @@
17443 #define DEBUGP(fmt...)
17444 #endif
17445
17446 -void *module_alloc(unsigned long size)
17447 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17448 {
17449 - if (PAGE_ALIGN(size) > MODULES_LEN)
17450 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17451 return NULL;
17452 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17453 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17454 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17455 -1, __builtin_return_address(0));
17456 }
17457
17458 +void *module_alloc(unsigned long size)
17459 +{
17460 +
17461 +#ifdef CONFIG_PAX_KERNEXEC
17462 + return __module_alloc(size, PAGE_KERNEL);
17463 +#else
17464 + return __module_alloc(size, PAGE_KERNEL_EXEC);
17465 +#endif
17466 +
17467 +}
17468 +
17469 +#ifdef CONFIG_PAX_KERNEXEC
17470 +#ifdef CONFIG_X86_32
17471 +void *module_alloc_exec(unsigned long size)
17472 +{
17473 + struct vm_struct *area;
17474 +
17475 + if (size == 0)
17476 + return NULL;
17477 +
17478 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17479 + return area ? area->addr : NULL;
17480 +}
17481 +EXPORT_SYMBOL(module_alloc_exec);
17482 +
17483 +void module_free_exec(struct module *mod, void *module_region)
17484 +{
17485 + vunmap(module_region);
17486 +}
17487 +EXPORT_SYMBOL(module_free_exec);
17488 +#else
17489 +void module_free_exec(struct module *mod, void *module_region)
17490 +{
17491 + module_free(mod, module_region);
17492 +}
17493 +EXPORT_SYMBOL(module_free_exec);
17494 +
17495 +void *module_alloc_exec(unsigned long size)
17496 +{
17497 + return __module_alloc(size, PAGE_KERNEL_RX);
17498 +}
17499 +EXPORT_SYMBOL(module_alloc_exec);
17500 +#endif
17501 +#endif
17502 +
17503 #ifdef CONFIG_X86_32
17504 int apply_relocate(Elf32_Shdr *sechdrs,
17505 const char *strtab,
17506 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17507 unsigned int i;
17508 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17509 Elf32_Sym *sym;
17510 - uint32_t *location;
17511 + uint32_t *plocation, location;
17512
17513 DEBUGP("Applying relocate section %u to %u\n", relsec,
17514 sechdrs[relsec].sh_info);
17515 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17516 /* This is where to make the change */
17517 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17518 - + rel[i].r_offset;
17519 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17520 + location = (uint32_t)plocation;
17521 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17522 + plocation = ktla_ktva((void *)plocation);
17523 /* This is the symbol it is referring to. Note that all
17524 undefined symbols have been resolved. */
17525 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17526 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17527 switch (ELF32_R_TYPE(rel[i].r_info)) {
17528 case R_386_32:
17529 /* We add the value into the location given */
17530 - *location += sym->st_value;
17531 + pax_open_kernel();
17532 + *plocation += sym->st_value;
17533 + pax_close_kernel();
17534 break;
17535 case R_386_PC32:
17536 /* Add the value, subtract its postition */
17537 - *location += sym->st_value - (uint32_t)location;
17538 + pax_open_kernel();
17539 + *plocation += sym->st_value - location;
17540 + pax_close_kernel();
17541 break;
17542 default:
17543 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17544 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17545 case R_X86_64_NONE:
17546 break;
17547 case R_X86_64_64:
17548 + pax_open_kernel();
17549 *(u64 *)loc = val;
17550 + pax_close_kernel();
17551 break;
17552 case R_X86_64_32:
17553 + pax_open_kernel();
17554 *(u32 *)loc = val;
17555 + pax_close_kernel();
17556 if (val != *(u32 *)loc)
17557 goto overflow;
17558 break;
17559 case R_X86_64_32S:
17560 + pax_open_kernel();
17561 *(s32 *)loc = val;
17562 + pax_close_kernel();
17563 if ((s64)val != *(s32 *)loc)
17564 goto overflow;
17565 break;
17566 case R_X86_64_PC32:
17567 val -= (u64)loc;
17568 + pax_open_kernel();
17569 *(u32 *)loc = val;
17570 + pax_close_kernel();
17571 +
17572 #if 0
17573 if ((s64)val != *(s32 *)loc)
17574 goto overflow;
17575 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17576 index 47acaf3..ec48ab6 100644
17577 --- a/arch/x86/kernel/nmi.c
17578 +++ b/arch/x86/kernel/nmi.c
17579 @@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
17580 dotraplinkage notrace __kprobes void
17581 do_nmi(struct pt_regs *regs, long error_code)
17582 {
17583 +
17584 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17585 + if (!user_mode(regs)) {
17586 + unsigned long cs = regs->cs & 0xFFFF;
17587 + unsigned long ip = ktva_ktla(regs->ip);
17588 +
17589 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17590 + regs->ip = ip;
17591 + }
17592 +#endif
17593 +
17594 nmi_nesting_preprocess(regs);
17595
17596 nmi_enter();
17597 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17598 index 676b8c7..870ba04 100644
17599 --- a/arch/x86/kernel/paravirt-spinlocks.c
17600 +++ b/arch/x86/kernel/paravirt-spinlocks.c
17601 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17602 arch_spin_lock(lock);
17603 }
17604
17605 -struct pv_lock_ops pv_lock_ops = {
17606 +struct pv_lock_ops pv_lock_ops __read_only = {
17607 #ifdef CONFIG_SMP
17608 .spin_is_locked = __ticket_spin_is_locked,
17609 .spin_is_contended = __ticket_spin_is_contended,
17610 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17611 index d90272e..6bb013b 100644
17612 --- a/arch/x86/kernel/paravirt.c
17613 +++ b/arch/x86/kernel/paravirt.c
17614 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17615 {
17616 return x;
17617 }
17618 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17619 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17620 +#endif
17621
17622 void __init default_banner(void)
17623 {
17624 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17625 if (opfunc == NULL)
17626 /* If there's no function, patch it with a ud2a (BUG) */
17627 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17628 - else if (opfunc == _paravirt_nop)
17629 + else if (opfunc == (void *)_paravirt_nop)
17630 /* If the operation is a nop, then nop the callsite */
17631 ret = paravirt_patch_nop();
17632
17633 /* identity functions just return their single argument */
17634 - else if (opfunc == _paravirt_ident_32)
17635 + else if (opfunc == (void *)_paravirt_ident_32)
17636 ret = paravirt_patch_ident_32(insnbuf, len);
17637 - else if (opfunc == _paravirt_ident_64)
17638 + else if (opfunc == (void *)_paravirt_ident_64)
17639 ret = paravirt_patch_ident_64(insnbuf, len);
17640 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17641 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17642 + ret = paravirt_patch_ident_64(insnbuf, len);
17643 +#endif
17644
17645 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17646 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17647 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17648 if (insn_len > len || start == NULL)
17649 insn_len = len;
17650 else
17651 - memcpy(insnbuf, start, insn_len);
17652 + memcpy(insnbuf, ktla_ktva(start), insn_len);
17653
17654 return insn_len;
17655 }
17656 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17657 preempt_enable();
17658 }
17659
17660 -struct pv_info pv_info = {
17661 +struct pv_info pv_info __read_only = {
17662 .name = "bare hardware",
17663 .paravirt_enabled = 0,
17664 .kernel_rpl = 0,
17665 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
17666 #endif
17667 };
17668
17669 -struct pv_init_ops pv_init_ops = {
17670 +struct pv_init_ops pv_init_ops __read_only = {
17671 .patch = native_patch,
17672 };
17673
17674 -struct pv_time_ops pv_time_ops = {
17675 +struct pv_time_ops pv_time_ops __read_only = {
17676 .sched_clock = native_sched_clock,
17677 .steal_clock = native_steal_clock,
17678 };
17679
17680 -struct pv_irq_ops pv_irq_ops = {
17681 +struct pv_irq_ops pv_irq_ops __read_only = {
17682 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17683 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17684 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17685 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17686 #endif
17687 };
17688
17689 -struct pv_cpu_ops pv_cpu_ops = {
17690 +struct pv_cpu_ops pv_cpu_ops __read_only = {
17691 .cpuid = native_cpuid,
17692 .get_debugreg = native_get_debugreg,
17693 .set_debugreg = native_set_debugreg,
17694 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17695 .end_context_switch = paravirt_nop,
17696 };
17697
17698 -struct pv_apic_ops pv_apic_ops = {
17699 +struct pv_apic_ops pv_apic_ops __read_only = {
17700 #ifdef CONFIG_X86_LOCAL_APIC
17701 .startup_ipi_hook = paravirt_nop,
17702 #endif
17703 };
17704
17705 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17706 +#ifdef CONFIG_X86_32
17707 +#ifdef CONFIG_X86_PAE
17708 +/* 64-bit pagetable entries */
17709 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17710 +#else
17711 /* 32-bit pagetable entries */
17712 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17713 +#endif
17714 #else
17715 /* 64-bit pagetable entries */
17716 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17717 #endif
17718
17719 -struct pv_mmu_ops pv_mmu_ops = {
17720 +struct pv_mmu_ops pv_mmu_ops __read_only = {
17721
17722 .read_cr2 = native_read_cr2,
17723 .write_cr2 = native_write_cr2,
17724 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17725 .make_pud = PTE_IDENT,
17726
17727 .set_pgd = native_set_pgd,
17728 + .set_pgd_batched = native_set_pgd_batched,
17729 #endif
17730 #endif /* PAGETABLE_LEVELS >= 3 */
17731
17732 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17733 },
17734
17735 .set_fixmap = native_set_fixmap,
17736 +
17737 +#ifdef CONFIG_PAX_KERNEXEC
17738 + .pax_open_kernel = native_pax_open_kernel,
17739 + .pax_close_kernel = native_pax_close_kernel,
17740 +#endif
17741 +
17742 };
17743
17744 EXPORT_SYMBOL_GPL(pv_time_ops);
17745 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17746 index 35ccf75..7a15747 100644
17747 --- a/arch/x86/kernel/pci-iommu_table.c
17748 +++ b/arch/x86/kernel/pci-iommu_table.c
17749 @@ -2,7 +2,7 @@
17750 #include <asm/iommu_table.h>
17751 #include <linux/string.h>
17752 #include <linux/kallsyms.h>
17753 -
17754 +#include <linux/sched.h>
17755
17756 #define DEBUG 1
17757
17758 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
17759 index 15763af..da59ada 100644
17760 --- a/arch/x86/kernel/process.c
17761 +++ b/arch/x86/kernel/process.c
17762 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
17763
17764 void free_thread_info(struct thread_info *ti)
17765 {
17766 - free_thread_xstate(ti->task);
17767 free_pages((unsigned long)ti, THREAD_ORDER);
17768 }
17769
17770 +static struct kmem_cache *task_struct_cachep;
17771 +
17772 void arch_task_cache_init(void)
17773 {
17774 - task_xstate_cachep =
17775 - kmem_cache_create("task_xstate", xstate_size,
17776 + /* create a slab on which task_structs can be allocated */
17777 + task_struct_cachep =
17778 + kmem_cache_create("task_struct", sizeof(struct task_struct),
17779 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
17780 +
17781 + task_xstate_cachep =
17782 + kmem_cache_create("task_xstate", xstate_size,
17783 __alignof__(union thread_xstate),
17784 - SLAB_PANIC | SLAB_NOTRACK, NULL);
17785 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
17786 +}
17787 +
17788 +struct task_struct *alloc_task_struct_node(int node)
17789 +{
17790 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
17791 +}
17792 +
17793 +void free_task_struct(struct task_struct *task)
17794 +{
17795 + free_thread_xstate(task);
17796 + kmem_cache_free(task_struct_cachep, task);
17797 }
17798
17799 /*
17800 @@ -70,7 +87,7 @@ void exit_thread(void)
17801 unsigned long *bp = t->io_bitmap_ptr;
17802
17803 if (bp) {
17804 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
17805 + struct tss_struct *tss = init_tss + get_cpu();
17806
17807 t->io_bitmap_ptr = NULL;
17808 clear_thread_flag(TIF_IO_BITMAP);
17809 @@ -106,7 +123,7 @@ void show_regs_common(void)
17810
17811 printk(KERN_CONT "\n");
17812 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
17813 - current->pid, current->comm, print_tainted(),
17814 + task_pid_nr(current), current->comm, print_tainted(),
17815 init_utsname()->release,
17816 (int)strcspn(init_utsname()->version, " "),
17817 init_utsname()->version);
17818 @@ -120,6 +137,9 @@ void flush_thread(void)
17819 {
17820 struct task_struct *tsk = current;
17821
17822 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17823 + loadsegment(gs, 0);
17824 +#endif
17825 flush_ptrace_hw_breakpoint(tsk);
17826 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
17827 /*
17828 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
17829 regs.di = (unsigned long) arg;
17830
17831 #ifdef CONFIG_X86_32
17832 - regs.ds = __USER_DS;
17833 - regs.es = __USER_DS;
17834 + regs.ds = __KERNEL_DS;
17835 + regs.es = __KERNEL_DS;
17836 regs.fs = __KERNEL_PERCPU;
17837 - regs.gs = __KERNEL_STACK_CANARY;
17838 + savesegment(gs, regs.gs);
17839 #else
17840 regs.ss = __KERNEL_DS;
17841 #endif
17842 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
17843
17844 return ret;
17845 }
17846 -void stop_this_cpu(void *dummy)
17847 +__noreturn void stop_this_cpu(void *dummy)
17848 {
17849 local_irq_disable();
17850 /*
17851 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
17852 }
17853 early_param("idle", idle_setup);
17854
17855 -unsigned long arch_align_stack(unsigned long sp)
17856 +#ifdef CONFIG_PAX_RANDKSTACK
17857 +void pax_randomize_kstack(struct pt_regs *regs)
17858 {
17859 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
17860 - sp -= get_random_int() % 8192;
17861 - return sp & ~0xf;
17862 -}
17863 + struct thread_struct *thread = &current->thread;
17864 + unsigned long time;
17865
17866 -unsigned long arch_randomize_brk(struct mm_struct *mm)
17867 -{
17868 - unsigned long range_end = mm->brk + 0x02000000;
17869 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
17870 -}
17871 + if (!randomize_va_space)
17872 + return;
17873 +
17874 + if (v8086_mode(regs))
17875 + return;
17876
17877 + rdtscl(time);
17878 +
17879 + /* P4 seems to return a 0 LSB, ignore it */
17880 +#ifdef CONFIG_MPENTIUM4
17881 + time &= 0x3EUL;
17882 + time <<= 2;
17883 +#elif defined(CONFIG_X86_64)
17884 + time &= 0xFUL;
17885 + time <<= 4;
17886 +#else
17887 + time &= 0x1FUL;
17888 + time <<= 3;
17889 +#endif
17890 +
17891 + thread->sp0 ^= time;
17892 + load_sp0(init_tss + smp_processor_id(), thread);
17893 +
17894 +#ifdef CONFIG_X86_64
17895 + percpu_write(kernel_stack, thread->sp0);
17896 +#endif
17897 +}
17898 +#endif
17899 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
17900 index c08d1ff..6ae1c81 100644
17901 --- a/arch/x86/kernel/process_32.c
17902 +++ b/arch/x86/kernel/process_32.c
17903 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
17904 unsigned long thread_saved_pc(struct task_struct *tsk)
17905 {
17906 return ((unsigned long *)tsk->thread.sp)[3];
17907 +//XXX return tsk->thread.eip;
17908 }
17909
17910 #ifndef CONFIG_SMP
17911 @@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
17912 unsigned long sp;
17913 unsigned short ss, gs;
17914
17915 - if (user_mode_vm(regs)) {
17916 + if (user_mode(regs)) {
17917 sp = regs->sp;
17918 ss = regs->ss & 0xffff;
17919 - gs = get_user_gs(regs);
17920 } else {
17921 sp = kernel_stack_pointer(regs);
17922 savesegment(ss, ss);
17923 - savesegment(gs, gs);
17924 }
17925 + gs = get_user_gs(regs);
17926
17927 show_regs_common();
17928
17929 @@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17930 struct task_struct *tsk;
17931 int err;
17932
17933 - childregs = task_pt_regs(p);
17934 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
17935 *childregs = *regs;
17936 childregs->ax = 0;
17937 childregs->sp = sp;
17938
17939 p->thread.sp = (unsigned long) childregs;
17940 p->thread.sp0 = (unsigned long) (childregs+1);
17941 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17942
17943 p->thread.ip = (unsigned long) ret_from_fork;
17944
17945 @@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17946 struct thread_struct *prev = &prev_p->thread,
17947 *next = &next_p->thread;
17948 int cpu = smp_processor_id();
17949 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
17950 + struct tss_struct *tss = init_tss + cpu;
17951 fpu_switch_t fpu;
17952
17953 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
17954 @@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17955 */
17956 lazy_save_gs(prev->gs);
17957
17958 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17959 + __set_fs(task_thread_info(next_p)->addr_limit);
17960 +#endif
17961 +
17962 /*
17963 * Load the per-thread Thread-Local Storage descriptor.
17964 */
17965 @@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17966 */
17967 arch_end_context_switch(next_p);
17968
17969 + percpu_write(current_task, next_p);
17970 + percpu_write(current_tinfo, &next_p->tinfo);
17971 +
17972 /*
17973 * Restore %gs if needed (which is common)
17974 */
17975 @@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17976
17977 switch_fpu_finish(next_p, fpu);
17978
17979 - percpu_write(current_task, next_p);
17980 -
17981 return prev_p;
17982 }
17983
17984 @@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
17985 } while (count++ < 16);
17986 return 0;
17987 }
17988 -
17989 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
17990 index cfa5c90..4facd28 100644
17991 --- a/arch/x86/kernel/process_64.c
17992 +++ b/arch/x86/kernel/process_64.c
17993 @@ -89,7 +89,7 @@ static void __exit_idle(void)
17994 void exit_idle(void)
17995 {
17996 /* idle loop has pid 0 */
17997 - if (current->pid)
17998 + if (task_pid_nr(current))
17999 return;
18000 __exit_idle();
18001 }
18002 @@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18003 struct pt_regs *childregs;
18004 struct task_struct *me = current;
18005
18006 - childregs = ((struct pt_regs *)
18007 - (THREAD_SIZE + task_stack_page(p))) - 1;
18008 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18009 *childregs = *regs;
18010
18011 childregs->ax = 0;
18012 @@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18013 p->thread.sp = (unsigned long) childregs;
18014 p->thread.sp0 = (unsigned long) (childregs+1);
18015 p->thread.usersp = me->thread.usersp;
18016 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18017
18018 set_tsk_thread_flag(p, TIF_FORK);
18019
18020 @@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18021 struct thread_struct *prev = &prev_p->thread;
18022 struct thread_struct *next = &next_p->thread;
18023 int cpu = smp_processor_id();
18024 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18025 + struct tss_struct *tss = init_tss + cpu;
18026 unsigned fsindex, gsindex;
18027 fpu_switch_t fpu;
18028
18029 @@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18030 prev->usersp = percpu_read(old_rsp);
18031 percpu_write(old_rsp, next->usersp);
18032 percpu_write(current_task, next_p);
18033 + percpu_write(current_tinfo, &next_p->tinfo);
18034
18035 - percpu_write(kernel_stack,
18036 - (unsigned long)task_stack_page(next_p) +
18037 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18038 + percpu_write(kernel_stack, next->sp0);
18039
18040 /*
18041 * Now maybe reload the debug registers and handle I/O bitmaps
18042 @@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18043 if (!p || p == current || p->state == TASK_RUNNING)
18044 return 0;
18045 stack = (unsigned long)task_stack_page(p);
18046 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18047 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18048 return 0;
18049 fp = *(u64 *)(p->thread.sp);
18050 do {
18051 - if (fp < (unsigned long)stack ||
18052 - fp >= (unsigned long)stack+THREAD_SIZE)
18053 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18054 return 0;
18055 ip = *(u64 *)(fp+8);
18056 if (!in_sched_functions(ip))
18057 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18058 index 5026738..9e6d6dc 100644
18059 --- a/arch/x86/kernel/ptrace.c
18060 +++ b/arch/x86/kernel/ptrace.c
18061 @@ -823,7 +823,7 @@ long arch_ptrace(struct task_struct *child, long request,
18062 unsigned long addr, unsigned long data)
18063 {
18064 int ret;
18065 - unsigned long __user *datap = (unsigned long __user *)data;
18066 + unsigned long __user *datap = (__force unsigned long __user *)data;
18067
18068 switch (request) {
18069 /* read the word at location addr in the USER area. */
18070 @@ -908,14 +908,14 @@ long arch_ptrace(struct task_struct *child, long request,
18071 if ((int) addr < 0)
18072 return -EIO;
18073 ret = do_get_thread_area(child, addr,
18074 - (struct user_desc __user *)data);
18075 + (__force struct user_desc __user *) data);
18076 break;
18077
18078 case PTRACE_SET_THREAD_AREA:
18079 if ((int) addr < 0)
18080 return -EIO;
18081 ret = do_set_thread_area(child, addr,
18082 - (struct user_desc __user *)data, 0);
18083 + (__force struct user_desc __user *) data, 0);
18084 break;
18085 #endif
18086
18087 @@ -1332,7 +1332,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18088 memset(info, 0, sizeof(*info));
18089 info->si_signo = SIGTRAP;
18090 info->si_code = si_code;
18091 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18092 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18093 }
18094
18095 void user_single_step_siginfo(struct task_struct *tsk,
18096 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18097 index 42eb330..139955c 100644
18098 --- a/arch/x86/kernel/pvclock.c
18099 +++ b/arch/x86/kernel/pvclock.c
18100 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18101 return pv_tsc_khz;
18102 }
18103
18104 -static atomic64_t last_value = ATOMIC64_INIT(0);
18105 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18106
18107 void pvclock_resume(void)
18108 {
18109 - atomic64_set(&last_value, 0);
18110 + atomic64_set_unchecked(&last_value, 0);
18111 }
18112
18113 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18114 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18115 * updating at the same time, and one of them could be slightly behind,
18116 * making the assumption that last_value always go forward fail to hold.
18117 */
18118 - last = atomic64_read(&last_value);
18119 + last = atomic64_read_unchecked(&last_value);
18120 do {
18121 if (ret < last)
18122 return last;
18123 - last = atomic64_cmpxchg(&last_value, last, ret);
18124 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18125 } while (unlikely(last != ret));
18126
18127 return ret;
18128 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18129 index d840e69..98e9581 100644
18130 --- a/arch/x86/kernel/reboot.c
18131 +++ b/arch/x86/kernel/reboot.c
18132 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18133 EXPORT_SYMBOL(pm_power_off);
18134
18135 static const struct desc_ptr no_idt = {};
18136 -static int reboot_mode;
18137 +static unsigned short reboot_mode;
18138 enum reboot_type reboot_type = BOOT_ACPI;
18139 int reboot_force;
18140
18141 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18142 extern const unsigned char machine_real_restart_asm[];
18143 extern const u64 machine_real_restart_gdt[3];
18144
18145 -void machine_real_restart(unsigned int type)
18146 +__noreturn void machine_real_restart(unsigned int type)
18147 {
18148 void *restart_va;
18149 unsigned long restart_pa;
18150 - void (*restart_lowmem)(unsigned int);
18151 + void (* __noreturn restart_lowmem)(unsigned int);
18152 u64 *lowmem_gdt;
18153
18154 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18155 + struct desc_struct *gdt;
18156 +#endif
18157 +
18158 local_irq_disable();
18159
18160 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18161 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18162 boot)". This seems like a fairly standard thing that gets set by
18163 REBOOT.COM programs, and the previous reset routine did this
18164 too. */
18165 - *((unsigned short *)0x472) = reboot_mode;
18166 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18167
18168 /* Patch the GDT in the low memory trampoline */
18169 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18170
18171 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18172 restart_pa = virt_to_phys(restart_va);
18173 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18174 + restart_lowmem = (void *)restart_pa;
18175
18176 /* GDT[0]: GDT self-pointer */
18177 lowmem_gdt[0] =
18178 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18179 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18180
18181 /* Jump to the identity-mapped low memory code */
18182 +
18183 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18184 + gdt = get_cpu_gdt_table(smp_processor_id());
18185 + pax_open_kernel();
18186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18187 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18188 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18189 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18190 +#endif
18191 +#ifdef CONFIG_PAX_KERNEXEC
18192 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18193 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18194 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18195 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18196 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18197 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18198 +#endif
18199 + pax_close_kernel();
18200 +#endif
18201 +
18202 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18203 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18204 + unreachable();
18205 +#else
18206 restart_lowmem(type);
18207 +#endif
18208 +
18209 }
18210 #ifdef CONFIG_APM_MODULE
18211 EXPORT_SYMBOL(machine_real_restart);
18212 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18213 * try to force a triple fault and then cycle between hitting the keyboard
18214 * controller and doing that
18215 */
18216 -static void native_machine_emergency_restart(void)
18217 +__noreturn static void native_machine_emergency_restart(void)
18218 {
18219 int i;
18220 int attempt = 0;
18221 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18222 #endif
18223 }
18224
18225 -static void __machine_emergency_restart(int emergency)
18226 +static __noreturn void __machine_emergency_restart(int emergency)
18227 {
18228 reboot_emergency = emergency;
18229 machine_ops.emergency_restart();
18230 }
18231
18232 -static void native_machine_restart(char *__unused)
18233 +static __noreturn void native_machine_restart(char *__unused)
18234 {
18235 printk("machine restart\n");
18236
18237 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18238 __machine_emergency_restart(0);
18239 }
18240
18241 -static void native_machine_halt(void)
18242 +static __noreturn void native_machine_halt(void)
18243 {
18244 /* stop other cpus and apics */
18245 machine_shutdown();
18246 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18247 stop_this_cpu(NULL);
18248 }
18249
18250 -static void native_machine_power_off(void)
18251 +__noreturn static void native_machine_power_off(void)
18252 {
18253 if (pm_power_off) {
18254 if (!reboot_force)
18255 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18256 }
18257 /* a fallback in case there is no PM info available */
18258 tboot_shutdown(TB_SHUTDOWN_HALT);
18259 + unreachable();
18260 }
18261
18262 struct machine_ops machine_ops = {
18263 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18264 index 7a6f3b3..bed145d7 100644
18265 --- a/arch/x86/kernel/relocate_kernel_64.S
18266 +++ b/arch/x86/kernel/relocate_kernel_64.S
18267 @@ -11,6 +11,7 @@
18268 #include <asm/kexec.h>
18269 #include <asm/processor-flags.h>
18270 #include <asm/pgtable_types.h>
18271 +#include <asm/alternative-asm.h>
18272
18273 /*
18274 * Must be relocatable PIC code callable as a C function
18275 @@ -160,13 +161,14 @@ identity_mapped:
18276 xorq %rbp, %rbp
18277 xorq %r8, %r8
18278 xorq %r9, %r9
18279 - xorq %r10, %r9
18280 + xorq %r10, %r10
18281 xorq %r11, %r11
18282 xorq %r12, %r12
18283 xorq %r13, %r13
18284 xorq %r14, %r14
18285 xorq %r15, %r15
18286
18287 + pax_force_retaddr 0, 1
18288 ret
18289
18290 1:
18291 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18292 index d7d5099..28555d0 100644
18293 --- a/arch/x86/kernel/setup.c
18294 +++ b/arch/x86/kernel/setup.c
18295 @@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
18296
18297 switch (data->type) {
18298 case SETUP_E820_EXT:
18299 - parse_e820_ext(data);
18300 + parse_e820_ext((struct setup_data __force_kernel *)data);
18301 break;
18302 case SETUP_DTB:
18303 add_dtb(pa_data);
18304 @@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
18305 * area (640->1Mb) as ram even though it is not.
18306 * take them out.
18307 */
18308 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18309 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18310 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18311 }
18312
18313 @@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
18314
18315 if (!boot_params.hdr.root_flags)
18316 root_mountflags &= ~MS_RDONLY;
18317 - init_mm.start_code = (unsigned long) _text;
18318 - init_mm.end_code = (unsigned long) _etext;
18319 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18320 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18321 init_mm.end_data = (unsigned long) _edata;
18322 init_mm.brk = _brk_end;
18323
18324 - code_resource.start = virt_to_phys(_text);
18325 - code_resource.end = virt_to_phys(_etext)-1;
18326 - data_resource.start = virt_to_phys(_etext);
18327 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18328 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18329 + data_resource.start = virt_to_phys(_sdata);
18330 data_resource.end = virt_to_phys(_edata)-1;
18331 bss_resource.start = virt_to_phys(&__bss_start);
18332 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18333 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18334 index 71f4727..16dc9f7 100644
18335 --- a/arch/x86/kernel/setup_percpu.c
18336 +++ b/arch/x86/kernel/setup_percpu.c
18337 @@ -21,19 +21,17 @@
18338 #include <asm/cpu.h>
18339 #include <asm/stackprotector.h>
18340
18341 -DEFINE_PER_CPU(int, cpu_number);
18342 +#ifdef CONFIG_SMP
18343 +DEFINE_PER_CPU(unsigned int, cpu_number);
18344 EXPORT_PER_CPU_SYMBOL(cpu_number);
18345 +#endif
18346
18347 -#ifdef CONFIG_X86_64
18348 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18349 -#else
18350 -#define BOOT_PERCPU_OFFSET 0
18351 -#endif
18352
18353 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18354 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18355
18356 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18357 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18358 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18359 };
18360 EXPORT_SYMBOL(__per_cpu_offset);
18361 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
18362 {
18363 #ifdef CONFIG_X86_32
18364 struct desc_struct gdt;
18365 + unsigned long base = per_cpu_offset(cpu);
18366
18367 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18368 - 0x2 | DESCTYPE_S, 0x8);
18369 - gdt.s = 1;
18370 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18371 + 0x83 | DESCTYPE_S, 0xC);
18372 write_gdt_entry(get_cpu_gdt_table(cpu),
18373 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18374 #endif
18375 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
18376 /* alrighty, percpu areas up and running */
18377 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18378 for_each_possible_cpu(cpu) {
18379 +#ifdef CONFIG_CC_STACKPROTECTOR
18380 +#ifdef CONFIG_X86_32
18381 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
18382 +#endif
18383 +#endif
18384 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18385 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18386 per_cpu(cpu_number, cpu) = cpu;
18387 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
18388 */
18389 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18390 #endif
18391 +#ifdef CONFIG_CC_STACKPROTECTOR
18392 +#ifdef CONFIG_X86_32
18393 + if (!cpu)
18394 + per_cpu(stack_canary.canary, cpu) = canary;
18395 +#endif
18396 +#endif
18397 /*
18398 * Up to this point, the boot CPU has been using .init.data
18399 * area. Reload any changed state for the boot CPU.
18400 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18401 index 46a01bd..2e88e6d 100644
18402 --- a/arch/x86/kernel/signal.c
18403 +++ b/arch/x86/kernel/signal.c
18404 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18405 * Align the stack pointer according to the i386 ABI,
18406 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18407 */
18408 - sp = ((sp + 4) & -16ul) - 4;
18409 + sp = ((sp - 12) & -16ul) - 4;
18410 #else /* !CONFIG_X86_32 */
18411 sp = round_down(sp, 16) - 8;
18412 #endif
18413 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18414 * Return an always-bogus address instead so we will die with SIGSEGV.
18415 */
18416 if (onsigstack && !likely(on_sig_stack(sp)))
18417 - return (void __user *)-1L;
18418 + return (__force void __user *)-1L;
18419
18420 /* save i387 state */
18421 if (used_math() && save_i387_xstate(*fpstate) < 0)
18422 - return (void __user *)-1L;
18423 + return (__force void __user *)-1L;
18424
18425 return (void __user *)sp;
18426 }
18427 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18428 }
18429
18430 if (current->mm->context.vdso)
18431 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18432 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18433 else
18434 - restorer = &frame->retcode;
18435 + restorer = (void __user *)&frame->retcode;
18436 if (ka->sa.sa_flags & SA_RESTORER)
18437 restorer = ka->sa.sa_restorer;
18438
18439 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18440 * reasons and because gdb uses it as a signature to notice
18441 * signal handler stack frames.
18442 */
18443 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18444 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18445
18446 if (err)
18447 return -EFAULT;
18448 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18449 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18450
18451 /* Set up to return from userspace. */
18452 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18453 + if (current->mm->context.vdso)
18454 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18455 + else
18456 + restorer = (void __user *)&frame->retcode;
18457 if (ka->sa.sa_flags & SA_RESTORER)
18458 restorer = ka->sa.sa_restorer;
18459 put_user_ex(restorer, &frame->pretcode);
18460 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18461 * reasons and because gdb uses it as a signature to notice
18462 * signal handler stack frames.
18463 */
18464 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18465 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18466 } put_user_catch(err);
18467
18468 if (err)
18469 @@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
18470 * X86_32: vm86 regs switched out by assembly code before reaching
18471 * here, so testing against kernel CS suffices.
18472 */
18473 - if (!user_mode(regs))
18474 + if (!user_mode_novm(regs))
18475 return;
18476
18477 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18478 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18479 index 66d250c..f1b10bd 100644
18480 --- a/arch/x86/kernel/smpboot.c
18481 +++ b/arch/x86/kernel/smpboot.c
18482 @@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18483 set_idle_for_cpu(cpu, c_idle.idle);
18484 do_rest:
18485 per_cpu(current_task, cpu) = c_idle.idle;
18486 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18487 #ifdef CONFIG_X86_32
18488 /* Stack for startup_32 can be just as for start_secondary onwards */
18489 irq_ctx_init(cpu);
18490 #else
18491 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18492 initial_gs = per_cpu_offset(cpu);
18493 - per_cpu(kernel_stack, cpu) =
18494 - (unsigned long)task_stack_page(c_idle.idle) -
18495 - KERNEL_STACK_OFFSET + THREAD_SIZE;
18496 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18497 #endif
18498 +
18499 + pax_open_kernel();
18500 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18501 + pax_close_kernel();
18502 +
18503 initial_code = (unsigned long)start_secondary;
18504 stack_start = c_idle.idle->thread.sp;
18505
18506 @@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18507
18508 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18509
18510 +#ifdef CONFIG_PAX_PER_CPU_PGD
18511 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18512 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18513 + KERNEL_PGD_PTRS);
18514 +#endif
18515 +
18516 err = do_boot_cpu(apicid, cpu);
18517 if (err) {
18518 pr_debug("do_boot_cpu failed %d\n", err);
18519 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18520 index c346d11..d43b163 100644
18521 --- a/arch/x86/kernel/step.c
18522 +++ b/arch/x86/kernel/step.c
18523 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18524 struct desc_struct *desc;
18525 unsigned long base;
18526
18527 - seg &= ~7UL;
18528 + seg >>= 3;
18529
18530 mutex_lock(&child->mm->context.lock);
18531 - if (unlikely((seg >> 3) >= child->mm->context.size))
18532 + if (unlikely(seg >= child->mm->context.size))
18533 addr = -1L; /* bogus selector, access would fault */
18534 else {
18535 desc = child->mm->context.ldt + seg;
18536 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18537 addr += base;
18538 }
18539 mutex_unlock(&child->mm->context.lock);
18540 - }
18541 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18542 + addr = ktla_ktva(addr);
18543
18544 return addr;
18545 }
18546 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18547 unsigned char opcode[15];
18548 unsigned long addr = convert_ip_to_linear(child, regs);
18549
18550 + if (addr == -EINVAL)
18551 + return 0;
18552 +
18553 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18554 for (i = 0; i < copied; i++) {
18555 switch (opcode[i]) {
18556 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18557 index 0b0cb5f..db6b9ed 100644
18558 --- a/arch/x86/kernel/sys_i386_32.c
18559 +++ b/arch/x86/kernel/sys_i386_32.c
18560 @@ -24,17 +24,224 @@
18561
18562 #include <asm/syscalls.h>
18563
18564 -/*
18565 - * Do a system call from kernel instead of calling sys_execve so we
18566 - * end up with proper pt_regs.
18567 - */
18568 -int kernel_execve(const char *filename,
18569 - const char *const argv[],
18570 - const char *const envp[])
18571 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18572 {
18573 - long __res;
18574 - asm volatile ("int $0x80"
18575 - : "=a" (__res)
18576 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18577 - return __res;
18578 + unsigned long pax_task_size = TASK_SIZE;
18579 +
18580 +#ifdef CONFIG_PAX_SEGMEXEC
18581 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18582 + pax_task_size = SEGMEXEC_TASK_SIZE;
18583 +#endif
18584 +
18585 + if (len > pax_task_size || addr > pax_task_size - len)
18586 + return -EINVAL;
18587 +
18588 + return 0;
18589 +}
18590 +
18591 +unsigned long
18592 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
18593 + unsigned long len, unsigned long pgoff, unsigned long flags)
18594 +{
18595 + struct mm_struct *mm = current->mm;
18596 + struct vm_area_struct *vma;
18597 + unsigned long start_addr, pax_task_size = TASK_SIZE;
18598 +
18599 +#ifdef CONFIG_PAX_SEGMEXEC
18600 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18601 + pax_task_size = SEGMEXEC_TASK_SIZE;
18602 +#endif
18603 +
18604 + pax_task_size -= PAGE_SIZE;
18605 +
18606 + if (len > pax_task_size)
18607 + return -ENOMEM;
18608 +
18609 + if (flags & MAP_FIXED)
18610 + return addr;
18611 +
18612 +#ifdef CONFIG_PAX_RANDMMAP
18613 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18614 +#endif
18615 +
18616 + if (addr) {
18617 + addr = PAGE_ALIGN(addr);
18618 + if (pax_task_size - len >= addr) {
18619 + vma = find_vma(mm, addr);
18620 + if (check_heap_stack_gap(vma, addr, len))
18621 + return addr;
18622 + }
18623 + }
18624 + if (len > mm->cached_hole_size) {
18625 + start_addr = addr = mm->free_area_cache;
18626 + } else {
18627 + start_addr = addr = mm->mmap_base;
18628 + mm->cached_hole_size = 0;
18629 + }
18630 +
18631 +#ifdef CONFIG_PAX_PAGEEXEC
18632 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18633 + start_addr = 0x00110000UL;
18634 +
18635 +#ifdef CONFIG_PAX_RANDMMAP
18636 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18637 + start_addr += mm->delta_mmap & 0x03FFF000UL;
18638 +#endif
18639 +
18640 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18641 + start_addr = addr = mm->mmap_base;
18642 + else
18643 + addr = start_addr;
18644 + }
18645 +#endif
18646 +
18647 +full_search:
18648 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18649 + /* At this point: (!vma || addr < vma->vm_end). */
18650 + if (pax_task_size - len < addr) {
18651 + /*
18652 + * Start a new search - just in case we missed
18653 + * some holes.
18654 + */
18655 + if (start_addr != mm->mmap_base) {
18656 + start_addr = addr = mm->mmap_base;
18657 + mm->cached_hole_size = 0;
18658 + goto full_search;
18659 + }
18660 + return -ENOMEM;
18661 + }
18662 + if (check_heap_stack_gap(vma, addr, len))
18663 + break;
18664 + if (addr + mm->cached_hole_size < vma->vm_start)
18665 + mm->cached_hole_size = vma->vm_start - addr;
18666 + addr = vma->vm_end;
18667 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
18668 + start_addr = addr = mm->mmap_base;
18669 + mm->cached_hole_size = 0;
18670 + goto full_search;
18671 + }
18672 + }
18673 +
18674 + /*
18675 + * Remember the place where we stopped the search:
18676 + */
18677 + mm->free_area_cache = addr + len;
18678 + return addr;
18679 +}
18680 +
18681 +unsigned long
18682 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18683 + const unsigned long len, const unsigned long pgoff,
18684 + const unsigned long flags)
18685 +{
18686 + struct vm_area_struct *vma;
18687 + struct mm_struct *mm = current->mm;
18688 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18689 +
18690 +#ifdef CONFIG_PAX_SEGMEXEC
18691 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18692 + pax_task_size = SEGMEXEC_TASK_SIZE;
18693 +#endif
18694 +
18695 + pax_task_size -= PAGE_SIZE;
18696 +
18697 + /* requested length too big for entire address space */
18698 + if (len > pax_task_size)
18699 + return -ENOMEM;
18700 +
18701 + if (flags & MAP_FIXED)
18702 + return addr;
18703 +
18704 +#ifdef CONFIG_PAX_PAGEEXEC
18705 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18706 + goto bottomup;
18707 +#endif
18708 +
18709 +#ifdef CONFIG_PAX_RANDMMAP
18710 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18711 +#endif
18712 +
18713 + /* requesting a specific address */
18714 + if (addr) {
18715 + addr = PAGE_ALIGN(addr);
18716 + if (pax_task_size - len >= addr) {
18717 + vma = find_vma(mm, addr);
18718 + if (check_heap_stack_gap(vma, addr, len))
18719 + return addr;
18720 + }
18721 + }
18722 +
18723 + /* check if free_area_cache is useful for us */
18724 + if (len <= mm->cached_hole_size) {
18725 + mm->cached_hole_size = 0;
18726 + mm->free_area_cache = mm->mmap_base;
18727 + }
18728 +
18729 + /* either no address requested or can't fit in requested address hole */
18730 + addr = mm->free_area_cache;
18731 +
18732 + /* make sure it can fit in the remaining address space */
18733 + if (addr > len) {
18734 + vma = find_vma(mm, addr-len);
18735 + if (check_heap_stack_gap(vma, addr - len, len))
18736 + /* remember the address as a hint for next time */
18737 + return (mm->free_area_cache = addr-len);
18738 + }
18739 +
18740 + if (mm->mmap_base < len)
18741 + goto bottomup;
18742 +
18743 + addr = mm->mmap_base-len;
18744 +
18745 + do {
18746 + /*
18747 + * Lookup failure means no vma is above this address,
18748 + * else if new region fits below vma->vm_start,
18749 + * return with success:
18750 + */
18751 + vma = find_vma(mm, addr);
18752 + if (check_heap_stack_gap(vma, addr, len))
18753 + /* remember the address as a hint for next time */
18754 + return (mm->free_area_cache = addr);
18755 +
18756 + /* remember the largest hole we saw so far */
18757 + if (addr + mm->cached_hole_size < vma->vm_start)
18758 + mm->cached_hole_size = vma->vm_start - addr;
18759 +
18760 + /* try just below the current vma->vm_start */
18761 + addr = skip_heap_stack_gap(vma, len);
18762 + } while (!IS_ERR_VALUE(addr));
18763 +
18764 +bottomup:
18765 + /*
18766 + * A failed mmap() very likely causes application failure,
18767 + * so fall back to the bottom-up function here. This scenario
18768 + * can happen with large stack limits and large mmap()
18769 + * allocations.
18770 + */
18771 +
18772 +#ifdef CONFIG_PAX_SEGMEXEC
18773 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18774 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
18775 + else
18776 +#endif
18777 +
18778 + mm->mmap_base = TASK_UNMAPPED_BASE;
18779 +
18780 +#ifdef CONFIG_PAX_RANDMMAP
18781 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18782 + mm->mmap_base += mm->delta_mmap;
18783 +#endif
18784 +
18785 + mm->free_area_cache = mm->mmap_base;
18786 + mm->cached_hole_size = ~0UL;
18787 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18788 + /*
18789 + * Restore the topdown base:
18790 + */
18791 + mm->mmap_base = base;
18792 + mm->free_area_cache = base;
18793 + mm->cached_hole_size = ~0UL;
18794 +
18795 + return addr;
18796 }
18797 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
18798 index 0514890..3dbebce 100644
18799 --- a/arch/x86/kernel/sys_x86_64.c
18800 +++ b/arch/x86/kernel/sys_x86_64.c
18801 @@ -95,8 +95,8 @@ out:
18802 return error;
18803 }
18804
18805 -static void find_start_end(unsigned long flags, unsigned long *begin,
18806 - unsigned long *end)
18807 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
18808 + unsigned long *begin, unsigned long *end)
18809 {
18810 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
18811 unsigned long new_begin;
18812 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
18813 *begin = new_begin;
18814 }
18815 } else {
18816 - *begin = TASK_UNMAPPED_BASE;
18817 + *begin = mm->mmap_base;
18818 *end = TASK_SIZE;
18819 }
18820 }
18821 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
18822 if (flags & MAP_FIXED)
18823 return addr;
18824
18825 - find_start_end(flags, &begin, &end);
18826 + find_start_end(mm, flags, &begin, &end);
18827
18828 if (len > end)
18829 return -ENOMEM;
18830
18831 +#ifdef CONFIG_PAX_RANDMMAP
18832 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18833 +#endif
18834 +
18835 if (addr) {
18836 addr = PAGE_ALIGN(addr);
18837 vma = find_vma(mm, addr);
18838 - if (end - len >= addr &&
18839 - (!vma || addr + len <= vma->vm_start))
18840 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
18841 return addr;
18842 }
18843 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
18844 @@ -172,7 +175,7 @@ full_search:
18845 }
18846 return -ENOMEM;
18847 }
18848 - if (!vma || addr + len <= vma->vm_start) {
18849 + if (check_heap_stack_gap(vma, addr, len)) {
18850 /*
18851 * Remember the place where we stopped the search:
18852 */
18853 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18854 {
18855 struct vm_area_struct *vma;
18856 struct mm_struct *mm = current->mm;
18857 - unsigned long addr = addr0;
18858 + unsigned long base = mm->mmap_base, addr = addr0;
18859
18860 /* requested length too big for entire address space */
18861 if (len > TASK_SIZE)
18862 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18863 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
18864 goto bottomup;
18865
18866 +#ifdef CONFIG_PAX_RANDMMAP
18867 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18868 +#endif
18869 +
18870 /* requesting a specific address */
18871 if (addr) {
18872 addr = PAGE_ALIGN(addr);
18873 - vma = find_vma(mm, addr);
18874 - if (TASK_SIZE - len >= addr &&
18875 - (!vma || addr + len <= vma->vm_start))
18876 - return addr;
18877 + if (TASK_SIZE - len >= addr) {
18878 + vma = find_vma(mm, addr);
18879 + if (check_heap_stack_gap(vma, addr, len))
18880 + return addr;
18881 + }
18882 }
18883
18884 /* check if free_area_cache is useful for us */
18885 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18886 ALIGN_TOPDOWN);
18887
18888 vma = find_vma(mm, tmp_addr);
18889 - if (!vma || tmp_addr + len <= vma->vm_start)
18890 + if (check_heap_stack_gap(vma, tmp_addr, len))
18891 /* remember the address as a hint for next time */
18892 return mm->free_area_cache = tmp_addr;
18893 }
18894 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18895 * return with success:
18896 */
18897 vma = find_vma(mm, addr);
18898 - if (!vma || addr+len <= vma->vm_start)
18899 + if (check_heap_stack_gap(vma, addr, len))
18900 /* remember the address as a hint for next time */
18901 return mm->free_area_cache = addr;
18902
18903 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18904 mm->cached_hole_size = vma->vm_start - addr;
18905
18906 /* try just below the current vma->vm_start */
18907 - addr = vma->vm_start-len;
18908 - } while (len < vma->vm_start);
18909 + addr = skip_heap_stack_gap(vma, len);
18910 + } while (!IS_ERR_VALUE(addr));
18911
18912 bottomup:
18913 /*
18914 @@ -270,13 +278,21 @@ bottomup:
18915 * can happen with large stack limits and large mmap()
18916 * allocations.
18917 */
18918 + mm->mmap_base = TASK_UNMAPPED_BASE;
18919 +
18920 +#ifdef CONFIG_PAX_RANDMMAP
18921 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18922 + mm->mmap_base += mm->delta_mmap;
18923 +#endif
18924 +
18925 + mm->free_area_cache = mm->mmap_base;
18926 mm->cached_hole_size = ~0UL;
18927 - mm->free_area_cache = TASK_UNMAPPED_BASE;
18928 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18929 /*
18930 * Restore the topdown base:
18931 */
18932 - mm->free_area_cache = mm->mmap_base;
18933 + mm->mmap_base = base;
18934 + mm->free_area_cache = base;
18935 mm->cached_hole_size = ~0UL;
18936
18937 return addr;
18938 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
18939 index e2410e2..4fe3fbc 100644
18940 --- a/arch/x86/kernel/tboot.c
18941 +++ b/arch/x86/kernel/tboot.c
18942 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
18943
18944 void tboot_shutdown(u32 shutdown_type)
18945 {
18946 - void (*shutdown)(void);
18947 + void (* __noreturn shutdown)(void);
18948
18949 if (!tboot_enabled())
18950 return;
18951 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
18952
18953 switch_to_tboot_pt();
18954
18955 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
18956 + shutdown = (void *)tboot->shutdown_entry;
18957 shutdown();
18958
18959 /* should not reach here */
18960 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
18961 tboot_shutdown(acpi_shutdown_map[sleep_state]);
18962 }
18963
18964 -static atomic_t ap_wfs_count;
18965 +static atomic_unchecked_t ap_wfs_count;
18966
18967 static int tboot_wait_for_aps(int num_aps)
18968 {
18969 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
18970 {
18971 switch (action) {
18972 case CPU_DYING:
18973 - atomic_inc(&ap_wfs_count);
18974 + atomic_inc_unchecked(&ap_wfs_count);
18975 if (num_online_cpus() == 1)
18976 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
18977 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
18978 return NOTIFY_BAD;
18979 break;
18980 }
18981 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
18982
18983 tboot_create_trampoline();
18984
18985 - atomic_set(&ap_wfs_count, 0);
18986 + atomic_set_unchecked(&ap_wfs_count, 0);
18987 register_hotcpu_notifier(&tboot_cpu_notifier);
18988 return 0;
18989 }
18990 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
18991 index dd5fbf4..b7f2232 100644
18992 --- a/arch/x86/kernel/time.c
18993 +++ b/arch/x86/kernel/time.c
18994 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
18995 {
18996 unsigned long pc = instruction_pointer(regs);
18997
18998 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
18999 + if (!user_mode(regs) && in_lock_functions(pc)) {
19000 #ifdef CONFIG_FRAME_POINTER
19001 - return *(unsigned long *)(regs->bp + sizeof(long));
19002 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19003 #else
19004 unsigned long *sp =
19005 (unsigned long *)kernel_stack_pointer(regs);
19006 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19007 * or above a saved flags. Eflags has bits 22-31 zero,
19008 * kernel addresses don't.
19009 */
19010 +
19011 +#ifdef CONFIG_PAX_KERNEXEC
19012 + return ktla_ktva(sp[0]);
19013 +#else
19014 if (sp[0] >> 22)
19015 return sp[0];
19016 if (sp[1] >> 22)
19017 return sp[1];
19018 #endif
19019 +
19020 +#endif
19021 }
19022 return pc;
19023 }
19024 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19025 index 6bb7b85..8f88b4a 100644
19026 --- a/arch/x86/kernel/tls.c
19027 +++ b/arch/x86/kernel/tls.c
19028 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19029 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19030 return -EINVAL;
19031
19032 +#ifdef CONFIG_PAX_SEGMEXEC
19033 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19034 + return -EINVAL;
19035 +#endif
19036 +
19037 set_tls_desc(p, idx, &info, 1);
19038
19039 return 0;
19040 @@ -163,7 +168,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
19041 {
19042 const struct desc_struct *tls;
19043
19044 - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
19045 + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
19046 (pos % sizeof(struct user_desc)) != 0 ||
19047 (count % sizeof(struct user_desc)) != 0)
19048 return -EINVAL;
19049 @@ -198,7 +203,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
19050 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
19051 const struct user_desc *info;
19052
19053 - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
19054 + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
19055 (pos % sizeof(struct user_desc)) != 0 ||
19056 (count % sizeof(struct user_desc)) != 0)
19057 return -EINVAL;
19058 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19059 index 451c0a7..e57f551 100644
19060 --- a/arch/x86/kernel/trampoline_32.S
19061 +++ b/arch/x86/kernel/trampoline_32.S
19062 @@ -32,6 +32,12 @@
19063 #include <asm/segment.h>
19064 #include <asm/page_types.h>
19065
19066 +#ifdef CONFIG_PAX_KERNEXEC
19067 +#define ta(X) (X)
19068 +#else
19069 +#define ta(X) ((X) - __PAGE_OFFSET)
19070 +#endif
19071 +
19072 #ifdef CONFIG_SMP
19073
19074 .section ".x86_trampoline","a"
19075 @@ -62,7 +68,7 @@ r_base = .
19076 inc %ax # protected mode (PE) bit
19077 lmsw %ax # into protected mode
19078 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19079 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19080 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19081
19082 # These need to be in the same 64K segment as the above;
19083 # hence we don't use the boot_gdt_descr defined in head.S
19084 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19085 index 09ff517..df19fbff 100644
19086 --- a/arch/x86/kernel/trampoline_64.S
19087 +++ b/arch/x86/kernel/trampoline_64.S
19088 @@ -90,7 +90,7 @@ startup_32:
19089 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19090 movl %eax, %ds
19091
19092 - movl $X86_CR4_PAE, %eax
19093 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19094 movl %eax, %cr4 # Enable PAE mode
19095
19096 # Setup trampoline 4 level pagetables
19097 @@ -138,7 +138,7 @@ tidt:
19098 # so the kernel can live anywhere
19099 .balign 4
19100 tgdt:
19101 - .short tgdt_end - tgdt # gdt limit
19102 + .short tgdt_end - tgdt - 1 # gdt limit
19103 .long tgdt - r_base
19104 .short 0
19105 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19106 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19107 index 4bbe04d..41d0943 100644
19108 --- a/arch/x86/kernel/traps.c
19109 +++ b/arch/x86/kernel/traps.c
19110 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19111
19112 /* Do we ignore FPU interrupts ? */
19113 char ignore_fpu_irq;
19114 -
19115 -/*
19116 - * The IDT has to be page-aligned to simplify the Pentium
19117 - * F0 0F bug workaround.
19118 - */
19119 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19120 #endif
19121
19122 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19123 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19124 }
19125
19126 static void __kprobes
19127 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19128 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19129 long error_code, siginfo_t *info)
19130 {
19131 struct task_struct *tsk = current;
19132
19133 #ifdef CONFIG_X86_32
19134 - if (regs->flags & X86_VM_MASK) {
19135 + if (v8086_mode(regs)) {
19136 /*
19137 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19138 * On nmi (interrupt 2), do_trap should not be called.
19139 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19140 }
19141 #endif
19142
19143 - if (!user_mode(regs))
19144 + if (!user_mode_novm(regs))
19145 goto kernel_trap;
19146
19147 #ifdef CONFIG_X86_32
19148 @@ -148,7 +142,7 @@ trap_signal:
19149 printk_ratelimit()) {
19150 printk(KERN_INFO
19151 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19152 - tsk->comm, tsk->pid, str,
19153 + tsk->comm, task_pid_nr(tsk), str,
19154 regs->ip, regs->sp, error_code);
19155 print_vma_addr(" in ", regs->ip);
19156 printk("\n");
19157 @@ -165,8 +159,20 @@ kernel_trap:
19158 if (!fixup_exception(regs)) {
19159 tsk->thread.error_code = error_code;
19160 tsk->thread.trap_no = trapnr;
19161 +
19162 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19163 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19164 + str = "PAX: suspicious stack segment fault";
19165 +#endif
19166 +
19167 die(str, regs, error_code);
19168 }
19169 +
19170 +#ifdef CONFIG_PAX_REFCOUNT
19171 + if (trapnr == 4)
19172 + pax_report_refcount_overflow(regs);
19173 +#endif
19174 +
19175 return;
19176
19177 #ifdef CONFIG_X86_32
19178 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19179 conditional_sti(regs);
19180
19181 #ifdef CONFIG_X86_32
19182 - if (regs->flags & X86_VM_MASK)
19183 + if (v8086_mode(regs))
19184 goto gp_in_vm86;
19185 #endif
19186
19187 tsk = current;
19188 - if (!user_mode(regs))
19189 + if (!user_mode_novm(regs))
19190 goto gp_in_kernel;
19191
19192 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19193 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19194 + struct mm_struct *mm = tsk->mm;
19195 + unsigned long limit;
19196 +
19197 + down_write(&mm->mmap_sem);
19198 + limit = mm->context.user_cs_limit;
19199 + if (limit < TASK_SIZE) {
19200 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19201 + up_write(&mm->mmap_sem);
19202 + return;
19203 + }
19204 + up_write(&mm->mmap_sem);
19205 + }
19206 +#endif
19207 +
19208 tsk->thread.error_code = error_code;
19209 tsk->thread.trap_no = 13;
19210
19211 @@ -295,6 +317,13 @@ gp_in_kernel:
19212 if (notify_die(DIE_GPF, "general protection fault", regs,
19213 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19214 return;
19215 +
19216 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19217 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19218 + die("PAX: suspicious general protection fault", regs, error_code);
19219 + else
19220 +#endif
19221 +
19222 die("general protection fault", regs, error_code);
19223 }
19224
19225 @@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19226 /* It's safe to allow irq's after DR6 has been saved */
19227 preempt_conditional_sti(regs);
19228
19229 - if (regs->flags & X86_VM_MASK) {
19230 + if (v8086_mode(regs)) {
19231 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19232 error_code, 1);
19233 preempt_conditional_cli(regs);
19234 @@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19235 * We already checked v86 mode above, so we can check for kernel mode
19236 * by just checking the CPL of CS.
19237 */
19238 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19239 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19240 tsk->thread.debugreg6 &= ~DR_STEP;
19241 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19242 regs->flags &= ~X86_EFLAGS_TF;
19243 @@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19244 return;
19245 conditional_sti(regs);
19246
19247 - if (!user_mode_vm(regs))
19248 + if (!user_mode(regs))
19249 {
19250 if (!fixup_exception(regs)) {
19251 task->thread.error_code = error_code;
19252 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19253 index b9242ba..50c5edd 100644
19254 --- a/arch/x86/kernel/verify_cpu.S
19255 +++ b/arch/x86/kernel/verify_cpu.S
19256 @@ -20,6 +20,7 @@
19257 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19258 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19259 * arch/x86/kernel/head_32.S: processor startup
19260 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19261 *
19262 * verify_cpu, returns the status of longmode and SSE in register %eax.
19263 * 0: Success 1: Failure
19264 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19265 index b466cab..a0df083 100644
19266 --- a/arch/x86/kernel/vm86_32.c
19267 +++ b/arch/x86/kernel/vm86_32.c
19268 @@ -41,6 +41,7 @@
19269 #include <linux/ptrace.h>
19270 #include <linux/audit.h>
19271 #include <linux/stddef.h>
19272 +#include <linux/grsecurity.h>
19273
19274 #include <asm/uaccess.h>
19275 #include <asm/io.h>
19276 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19277 do_exit(SIGSEGV);
19278 }
19279
19280 - tss = &per_cpu(init_tss, get_cpu());
19281 + tss = init_tss + get_cpu();
19282 current->thread.sp0 = current->thread.saved_sp0;
19283 current->thread.sysenter_cs = __KERNEL_CS;
19284 load_sp0(tss, &current->thread);
19285 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19286 struct task_struct *tsk;
19287 int tmp, ret = -EPERM;
19288
19289 +#ifdef CONFIG_GRKERNSEC_VM86
19290 + if (!capable(CAP_SYS_RAWIO)) {
19291 + gr_handle_vm86();
19292 + goto out;
19293 + }
19294 +#endif
19295 +
19296 tsk = current;
19297 if (tsk->thread.saved_sp0)
19298 goto out;
19299 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19300 int tmp, ret;
19301 struct vm86plus_struct __user *v86;
19302
19303 +#ifdef CONFIG_GRKERNSEC_VM86
19304 + if (!capable(CAP_SYS_RAWIO)) {
19305 + gr_handle_vm86();
19306 + ret = -EPERM;
19307 + goto out;
19308 + }
19309 +#endif
19310 +
19311 tsk = current;
19312 switch (cmd) {
19313 case VM86_REQUEST_IRQ:
19314 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19315 tsk->thread.saved_fs = info->regs32->fs;
19316 tsk->thread.saved_gs = get_user_gs(info->regs32);
19317
19318 - tss = &per_cpu(init_tss, get_cpu());
19319 + tss = init_tss + get_cpu();
19320 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19321 if (cpu_has_sep)
19322 tsk->thread.sysenter_cs = 0;
19323 @@ -531,7 +547,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19324 goto cannot_handle;
19325 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19326 goto cannot_handle;
19327 - intr_ptr = (unsigned long __user *) (i << 2);
19328 + intr_ptr = (__force unsigned long __user *) (i << 2);
19329 if (get_user(segoffs, intr_ptr))
19330 goto cannot_handle;
19331 if ((segoffs >> 16) == BIOSSEG)
19332 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19333 index 0f703f1..9e15f64 100644
19334 --- a/arch/x86/kernel/vmlinux.lds.S
19335 +++ b/arch/x86/kernel/vmlinux.lds.S
19336 @@ -26,6 +26,13 @@
19337 #include <asm/page_types.h>
19338 #include <asm/cache.h>
19339 #include <asm/boot.h>
19340 +#include <asm/segment.h>
19341 +
19342 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19343 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19344 +#else
19345 +#define __KERNEL_TEXT_OFFSET 0
19346 +#endif
19347
19348 #undef i386 /* in case the preprocessor is a 32bit one */
19349
19350 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19351
19352 PHDRS {
19353 text PT_LOAD FLAGS(5); /* R_E */
19354 +#ifdef CONFIG_X86_32
19355 + module PT_LOAD FLAGS(5); /* R_E */
19356 +#endif
19357 +#ifdef CONFIG_XEN
19358 + rodata PT_LOAD FLAGS(5); /* R_E */
19359 +#else
19360 + rodata PT_LOAD FLAGS(4); /* R__ */
19361 +#endif
19362 data PT_LOAD FLAGS(6); /* RW_ */
19363 -#ifdef CONFIG_X86_64
19364 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19365 #ifdef CONFIG_SMP
19366 percpu PT_LOAD FLAGS(6); /* RW_ */
19367 #endif
19368 + text.init PT_LOAD FLAGS(5); /* R_E */
19369 + text.exit PT_LOAD FLAGS(5); /* R_E */
19370 init PT_LOAD FLAGS(7); /* RWE */
19371 -#endif
19372 note PT_NOTE FLAGS(0); /* ___ */
19373 }
19374
19375 SECTIONS
19376 {
19377 #ifdef CONFIG_X86_32
19378 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19379 - phys_startup_32 = startup_32 - LOAD_OFFSET;
19380 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19381 #else
19382 - . = __START_KERNEL;
19383 - phys_startup_64 = startup_64 - LOAD_OFFSET;
19384 + . = __START_KERNEL;
19385 #endif
19386
19387 /* Text and read-only data */
19388 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
19389 - _text = .;
19390 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19391 /* bootstrapping code */
19392 +#ifdef CONFIG_X86_32
19393 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19394 +#else
19395 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19396 +#endif
19397 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19398 + _text = .;
19399 HEAD_TEXT
19400 #ifdef CONFIG_X86_32
19401 . = ALIGN(PAGE_SIZE);
19402 @@ -108,13 +128,47 @@ SECTIONS
19403 IRQENTRY_TEXT
19404 *(.fixup)
19405 *(.gnu.warning)
19406 - /* End of text section */
19407 - _etext = .;
19408 } :text = 0x9090
19409
19410 - NOTES :text :note
19411 + . += __KERNEL_TEXT_OFFSET;
19412
19413 - EXCEPTION_TABLE(16) :text = 0x9090
19414 +#ifdef CONFIG_X86_32
19415 + . = ALIGN(PAGE_SIZE);
19416 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19417 +
19418 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19419 + MODULES_EXEC_VADDR = .;
19420 + BYTE(0)
19421 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19422 + . = ALIGN(HPAGE_SIZE);
19423 + MODULES_EXEC_END = . - 1;
19424 +#endif
19425 +
19426 + } :module
19427 +#endif
19428 +
19429 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19430 + /* End of text section */
19431 + _etext = . - __KERNEL_TEXT_OFFSET;
19432 + }
19433 +
19434 +#ifdef CONFIG_X86_32
19435 + . = ALIGN(PAGE_SIZE);
19436 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19437 + *(.idt)
19438 + . = ALIGN(PAGE_SIZE);
19439 + *(.empty_zero_page)
19440 + *(.initial_pg_fixmap)
19441 + *(.initial_pg_pmd)
19442 + *(.initial_page_table)
19443 + *(.swapper_pg_dir)
19444 + } :rodata
19445 +#endif
19446 +
19447 + . = ALIGN(PAGE_SIZE);
19448 + NOTES :rodata :note
19449 +
19450 + EXCEPTION_TABLE(16) :rodata
19451
19452 #if defined(CONFIG_DEBUG_RODATA)
19453 /* .text should occupy whole number of pages */
19454 @@ -126,16 +180,20 @@ SECTIONS
19455
19456 /* Data */
19457 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19458 +
19459 +#ifdef CONFIG_PAX_KERNEXEC
19460 + . = ALIGN(HPAGE_SIZE);
19461 +#else
19462 + . = ALIGN(PAGE_SIZE);
19463 +#endif
19464 +
19465 /* Start of data section */
19466 _sdata = .;
19467
19468 /* init_task */
19469 INIT_TASK_DATA(THREAD_SIZE)
19470
19471 -#ifdef CONFIG_X86_32
19472 - /* 32 bit has nosave before _edata */
19473 NOSAVE_DATA
19474 -#endif
19475
19476 PAGE_ALIGNED_DATA(PAGE_SIZE)
19477
19478 @@ -176,12 +234,19 @@ SECTIONS
19479 #endif /* CONFIG_X86_64 */
19480
19481 /* Init code and data - will be freed after init */
19482 - . = ALIGN(PAGE_SIZE);
19483 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19484 + BYTE(0)
19485 +
19486 +#ifdef CONFIG_PAX_KERNEXEC
19487 + . = ALIGN(HPAGE_SIZE);
19488 +#else
19489 + . = ALIGN(PAGE_SIZE);
19490 +#endif
19491 +
19492 __init_begin = .; /* paired with __init_end */
19493 - }
19494 + } :init.begin
19495
19496 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19497 +#ifdef CONFIG_SMP
19498 /*
19499 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19500 * output PHDR, so the next output section - .init.text - should
19501 @@ -190,12 +255,27 @@ SECTIONS
19502 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19503 #endif
19504
19505 - INIT_TEXT_SECTION(PAGE_SIZE)
19506 -#ifdef CONFIG_X86_64
19507 - :init
19508 -#endif
19509 + . = ALIGN(PAGE_SIZE);
19510 + init_begin = .;
19511 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19512 + VMLINUX_SYMBOL(_sinittext) = .;
19513 + INIT_TEXT
19514 + VMLINUX_SYMBOL(_einittext) = .;
19515 + . = ALIGN(PAGE_SIZE);
19516 + } :text.init
19517
19518 - INIT_DATA_SECTION(16)
19519 + /*
19520 + * .exit.text is discard at runtime, not link time, to deal with
19521 + * references from .altinstructions and .eh_frame
19522 + */
19523 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19524 + EXIT_TEXT
19525 + . = ALIGN(16);
19526 + } :text.exit
19527 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19528 +
19529 + . = ALIGN(PAGE_SIZE);
19530 + INIT_DATA_SECTION(16) :init
19531
19532 /*
19533 * Code and data for a variety of lowlevel trampolines, to be
19534 @@ -269,19 +349,12 @@ SECTIONS
19535 }
19536
19537 . = ALIGN(8);
19538 - /*
19539 - * .exit.text is discard at runtime, not link time, to deal with
19540 - * references from .altinstructions and .eh_frame
19541 - */
19542 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19543 - EXIT_TEXT
19544 - }
19545
19546 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19547 EXIT_DATA
19548 }
19549
19550 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19551 +#ifndef CONFIG_SMP
19552 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19553 #endif
19554
19555 @@ -300,16 +373,10 @@ SECTIONS
19556 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19557 __smp_locks = .;
19558 *(.smp_locks)
19559 - . = ALIGN(PAGE_SIZE);
19560 __smp_locks_end = .;
19561 + . = ALIGN(PAGE_SIZE);
19562 }
19563
19564 -#ifdef CONFIG_X86_64
19565 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19566 - NOSAVE_DATA
19567 - }
19568 -#endif
19569 -
19570 /* BSS */
19571 . = ALIGN(PAGE_SIZE);
19572 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19573 @@ -325,6 +392,7 @@ SECTIONS
19574 __brk_base = .;
19575 . += 64 * 1024; /* 64k alignment slop space */
19576 *(.brk_reservation) /* areas brk users have reserved */
19577 + . = ALIGN(HPAGE_SIZE);
19578 __brk_limit = .;
19579 }
19580
19581 @@ -351,13 +419,12 @@ SECTIONS
19582 * for the boot processor.
19583 */
19584 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19585 -INIT_PER_CPU(gdt_page);
19586 INIT_PER_CPU(irq_stack_union);
19587
19588 /*
19589 * Build-time check on the image size:
19590 */
19591 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19592 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19593 "kernel image bigger than KERNEL_IMAGE_SIZE");
19594
19595 #ifdef CONFIG_SMP
19596 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19597 index b07ba93..a212969 100644
19598 --- a/arch/x86/kernel/vsyscall_64.c
19599 +++ b/arch/x86/kernel/vsyscall_64.c
19600 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19601 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19602 };
19603
19604 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
19605 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19606
19607 static int __init vsyscall_setup(char *str)
19608 {
19609 if (str) {
19610 if (!strcmp("emulate", str))
19611 vsyscall_mode = EMULATE;
19612 - else if (!strcmp("native", str))
19613 - vsyscall_mode = NATIVE;
19614 else if (!strcmp("none", str))
19615 vsyscall_mode = NONE;
19616 else
19617 @@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19618
19619 tsk = current;
19620 if (seccomp_mode(&tsk->seccomp))
19621 - do_exit(SIGKILL);
19622 + do_group_exit(SIGKILL);
19623
19624 /*
19625 * With a real vsyscall, page faults cause SIGSEGV. We want to
19626 @@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19627 return true;
19628
19629 sigsegv:
19630 - force_sig(SIGSEGV, current);
19631 - return true;
19632 + do_group_exit(SIGKILL);
19633 }
19634
19635 /*
19636 @@ -333,10 +330,7 @@ void __init map_vsyscall(void)
19637 extern char __vvar_page;
19638 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19639
19640 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19641 - vsyscall_mode == NATIVE
19642 - ? PAGE_KERNEL_VSYSCALL
19643 - : PAGE_KERNEL_VVAR);
19644 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19645 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19646 (unsigned long)VSYSCALL_START);
19647
19648 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19649 index 9796c2f..f686fbf 100644
19650 --- a/arch/x86/kernel/x8664_ksyms_64.c
19651 +++ b/arch/x86/kernel/x8664_ksyms_64.c
19652 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19653 EXPORT_SYMBOL(copy_user_generic_string);
19654 EXPORT_SYMBOL(copy_user_generic_unrolled);
19655 EXPORT_SYMBOL(__copy_user_nocache);
19656 -EXPORT_SYMBOL(_copy_from_user);
19657 -EXPORT_SYMBOL(_copy_to_user);
19658
19659 EXPORT_SYMBOL(copy_page);
19660 EXPORT_SYMBOL(clear_page);
19661 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19662 index 7110911..e8cdee5 100644
19663 --- a/arch/x86/kernel/xsave.c
19664 +++ b/arch/x86/kernel/xsave.c
19665 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19666 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19667 return -EINVAL;
19668
19669 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19670 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19671 fx_sw_user->extended_size -
19672 FP_XSTATE_MAGIC2_SIZE));
19673 if (err)
19674 @@ -266,7 +266,7 @@ fx_only:
19675 * the other extended state.
19676 */
19677 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19678 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19679 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19680 }
19681
19682 /*
19683 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19684 if (use_xsave())
19685 err = restore_user_xstate(buf);
19686 else
19687 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
19688 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19689 buf);
19690 if (unlikely(err)) {
19691 /*
19692 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19693 index 89b02bf..0f6511d 100644
19694 --- a/arch/x86/kvm/cpuid.c
19695 +++ b/arch/x86/kvm/cpuid.c
19696 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19697 struct kvm_cpuid2 *cpuid,
19698 struct kvm_cpuid_entry2 __user *entries)
19699 {
19700 - int r;
19701 + int r, i;
19702
19703 r = -E2BIG;
19704 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19705 goto out;
19706 r = -EFAULT;
19707 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19708 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19709 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19710 goto out;
19711 + for (i = 0; i < cpuid->nent; ++i) {
19712 + struct kvm_cpuid_entry2 cpuid_entry;
19713 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19714 + goto out;
19715 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
19716 + }
19717 vcpu->arch.cpuid_nent = cpuid->nent;
19718 kvm_apic_set_version(vcpu);
19719 kvm_x86_ops->cpuid_update(vcpu);
19720 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19721 struct kvm_cpuid2 *cpuid,
19722 struct kvm_cpuid_entry2 __user *entries)
19723 {
19724 - int r;
19725 + int r, i;
19726
19727 r = -E2BIG;
19728 if (cpuid->nent < vcpu->arch.cpuid_nent)
19729 goto out;
19730 r = -EFAULT;
19731 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19732 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19733 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19734 goto out;
19735 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
19736 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
19737 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
19738 + goto out;
19739 + }
19740 return 0;
19741
19742 out:
19743 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19744 index 0982507..7f6d72f 100644
19745 --- a/arch/x86/kvm/emulate.c
19746 +++ b/arch/x86/kvm/emulate.c
19747 @@ -250,6 +250,7 @@ struct gprefix {
19748
19749 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
19750 do { \
19751 + unsigned long _tmp; \
19752 __asm__ __volatile__ ( \
19753 _PRE_EFLAGS("0", "4", "2") \
19754 _op _suffix " %"_x"3,%1; " \
19755 @@ -264,8 +265,6 @@ struct gprefix {
19756 /* Raw emulation: instruction has two explicit operands. */
19757 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
19758 do { \
19759 - unsigned long _tmp; \
19760 - \
19761 switch ((ctxt)->dst.bytes) { \
19762 case 2: \
19763 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
19764 @@ -281,7 +280,6 @@ struct gprefix {
19765
19766 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
19767 do { \
19768 - unsigned long _tmp; \
19769 switch ((ctxt)->dst.bytes) { \
19770 case 1: \
19771 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
19772 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19773 index cfdc6e0..ab92e84 100644
19774 --- a/arch/x86/kvm/lapic.c
19775 +++ b/arch/x86/kvm/lapic.c
19776 @@ -54,7 +54,7 @@
19777 #define APIC_BUS_CYCLE_NS 1
19778
19779 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
19780 -#define apic_debug(fmt, arg...)
19781 +#define apic_debug(fmt, arg...) do {} while (0)
19782
19783 #define APIC_LVT_NUM 6
19784 /* 14 is the version for Xeon and Pentium 8.4.8*/
19785 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
19786 index 1561028..0ed7f14 100644
19787 --- a/arch/x86/kvm/paging_tmpl.h
19788 +++ b/arch/x86/kvm/paging_tmpl.h
19789 @@ -197,7 +197,7 @@ retry_walk:
19790 if (unlikely(kvm_is_error_hva(host_addr)))
19791 goto error;
19792
19793 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
19794 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
19795 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
19796 goto error;
19797
19798 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
19799 index e385214..f8df033 100644
19800 --- a/arch/x86/kvm/svm.c
19801 +++ b/arch/x86/kvm/svm.c
19802 @@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
19803 int cpu = raw_smp_processor_id();
19804
19805 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19806 +
19807 + pax_open_kernel();
19808 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
19809 + pax_close_kernel();
19810 +
19811 load_TR_desc();
19812 }
19813
19814 @@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
19815 #endif
19816 #endif
19817
19818 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19819 + __set_fs(current_thread_info()->addr_limit);
19820 +#endif
19821 +
19822 reload_tss(vcpu);
19823
19824 local_irq_disable();
19825 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
19826 index 3b4c8d8..f457b63 100644
19827 --- a/arch/x86/kvm/vmx.c
19828 +++ b/arch/x86/kvm/vmx.c
19829 @@ -1306,7 +1306,11 @@ static void reload_tss(void)
19830 struct desc_struct *descs;
19831
19832 descs = (void *)gdt->address;
19833 +
19834 + pax_open_kernel();
19835 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
19836 + pax_close_kernel();
19837 +
19838 load_TR_desc();
19839 }
19840
19841 @@ -2631,8 +2635,11 @@ static __init int hardware_setup(void)
19842 if (!cpu_has_vmx_flexpriority())
19843 flexpriority_enabled = 0;
19844
19845 - if (!cpu_has_vmx_tpr_shadow())
19846 - kvm_x86_ops->update_cr8_intercept = NULL;
19847 + if (!cpu_has_vmx_tpr_shadow()) {
19848 + pax_open_kernel();
19849 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
19850 + pax_close_kernel();
19851 + }
19852
19853 if (enable_ept && !cpu_has_vmx_ept_2m_page())
19854 kvm_disable_largepages();
19855 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
19856 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
19857
19858 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
19859 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
19860 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
19861
19862 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
19863 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
19864 @@ -6184,6 +6191,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19865 "jmp .Lkvm_vmx_return \n\t"
19866 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
19867 ".Lkvm_vmx_return: "
19868 +
19869 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19870 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
19871 + ".Lkvm_vmx_return2: "
19872 +#endif
19873 +
19874 /* Save guest registers, load host registers, keep flags */
19875 "mov %0, %c[wordsize](%%"R"sp) \n\t"
19876 "pop %0 \n\t"
19877 @@ -6232,6 +6245,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19878 #endif
19879 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
19880 [wordsize]"i"(sizeof(ulong))
19881 +
19882 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19883 + ,[cs]"i"(__KERNEL_CS)
19884 +#endif
19885 +
19886 : "cc", "memory"
19887 , R"ax", R"bx", R"di", R"si"
19888 #ifdef CONFIG_X86_64
19889 @@ -6260,7 +6278,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19890 }
19891 }
19892
19893 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
19894 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
19895 +
19896 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19897 + loadsegment(fs, __KERNEL_PERCPU);
19898 +#endif
19899 +
19900 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19901 + __set_fs(current_thread_info()->addr_limit);
19902 +#endif
19903 +
19904 vmx->loaded_vmcs->launched = 1;
19905
19906 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
19907 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19908 index 9cbfc06..7ddc9fa 100644
19909 --- a/arch/x86/kvm/x86.c
19910 +++ b/arch/x86/kvm/x86.c
19911 @@ -1311,8 +1311,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
19912 {
19913 struct kvm *kvm = vcpu->kvm;
19914 int lm = is_long_mode(vcpu);
19915 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19916 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19917 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19918 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19919 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
19920 : kvm->arch.xen_hvm_config.blob_size_32;
19921 u32 page_num = data & ~PAGE_MASK;
19922 @@ -2145,6 +2145,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
19923 if (n < msr_list.nmsrs)
19924 goto out;
19925 r = -EFAULT;
19926 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
19927 + goto out;
19928 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
19929 num_msrs_to_save * sizeof(u32)))
19930 goto out;
19931 @@ -2266,7 +2268,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
19932 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
19933 struct kvm_interrupt *irq)
19934 {
19935 - if (irq->irq < 0 || irq->irq >= 256)
19936 + if (irq->irq >= 256)
19937 return -EINVAL;
19938 if (irqchip_in_kernel(vcpu->kvm))
19939 return -ENXIO;
19940 @@ -4780,7 +4782,7 @@ static void kvm_set_mmio_spte_mask(void)
19941 kvm_mmu_set_mmio_spte_mask(mask);
19942 }
19943
19944 -int kvm_arch_init(void *opaque)
19945 +int kvm_arch_init(const void *opaque)
19946 {
19947 int r;
19948 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
19949 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
19950 index 642d880..44e0f3f 100644
19951 --- a/arch/x86/lguest/boot.c
19952 +++ b/arch/x86/lguest/boot.c
19953 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
19954 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
19955 * Launcher to reboot us.
19956 */
19957 -static void lguest_restart(char *reason)
19958 +static __noreturn void lguest_restart(char *reason)
19959 {
19960 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
19961 + BUG();
19962 }
19963
19964 /*G:050
19965 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
19966 index 042f682..c92afb6 100644
19967 --- a/arch/x86/lib/atomic64_32.c
19968 +++ b/arch/x86/lib/atomic64_32.c
19969 @@ -8,18 +8,30 @@
19970
19971 long long atomic64_read_cx8(long long, const atomic64_t *v);
19972 EXPORT_SYMBOL(atomic64_read_cx8);
19973 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19974 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
19975 long long atomic64_set_cx8(long long, const atomic64_t *v);
19976 EXPORT_SYMBOL(atomic64_set_cx8);
19977 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19978 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
19979 long long atomic64_xchg_cx8(long long, unsigned high);
19980 EXPORT_SYMBOL(atomic64_xchg_cx8);
19981 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
19982 EXPORT_SYMBOL(atomic64_add_return_cx8);
19983 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19984 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
19985 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
19986 EXPORT_SYMBOL(atomic64_sub_return_cx8);
19987 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19988 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
19989 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
19990 EXPORT_SYMBOL(atomic64_inc_return_cx8);
19991 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19992 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
19993 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
19994 EXPORT_SYMBOL(atomic64_dec_return_cx8);
19995 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19996 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
19997 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
19998 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
19999 int atomic64_inc_not_zero_cx8(atomic64_t *v);
20000 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
20001 #ifndef CONFIG_X86_CMPXCHG64
20002 long long atomic64_read_386(long long, const atomic64_t *v);
20003 EXPORT_SYMBOL(atomic64_read_386);
20004 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
20005 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
20006 long long atomic64_set_386(long long, const atomic64_t *v);
20007 EXPORT_SYMBOL(atomic64_set_386);
20008 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
20009 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
20010 long long atomic64_xchg_386(long long, unsigned high);
20011 EXPORT_SYMBOL(atomic64_xchg_386);
20012 long long atomic64_add_return_386(long long a, atomic64_t *v);
20013 EXPORT_SYMBOL(atomic64_add_return_386);
20014 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20015 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
20016 long long atomic64_sub_return_386(long long a, atomic64_t *v);
20017 EXPORT_SYMBOL(atomic64_sub_return_386);
20018 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20019 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20020 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20021 EXPORT_SYMBOL(atomic64_inc_return_386);
20022 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20023 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20024 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20025 EXPORT_SYMBOL(atomic64_dec_return_386);
20026 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20027 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20028 long long atomic64_add_386(long long a, atomic64_t *v);
20029 EXPORT_SYMBOL(atomic64_add_386);
20030 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20031 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20032 long long atomic64_sub_386(long long a, atomic64_t *v);
20033 EXPORT_SYMBOL(atomic64_sub_386);
20034 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20035 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20036 long long atomic64_inc_386(long long a, atomic64_t *v);
20037 EXPORT_SYMBOL(atomic64_inc_386);
20038 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20039 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20040 long long atomic64_dec_386(long long a, atomic64_t *v);
20041 EXPORT_SYMBOL(atomic64_dec_386);
20042 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20043 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20044 long long atomic64_dec_if_positive_386(atomic64_t *v);
20045 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20046 int atomic64_inc_not_zero_386(atomic64_t *v);
20047 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20048 index e8e7e0d..56fd1b0 100644
20049 --- a/arch/x86/lib/atomic64_386_32.S
20050 +++ b/arch/x86/lib/atomic64_386_32.S
20051 @@ -48,6 +48,10 @@ BEGIN(read)
20052 movl (v), %eax
20053 movl 4(v), %edx
20054 RET_ENDP
20055 +BEGIN(read_unchecked)
20056 + movl (v), %eax
20057 + movl 4(v), %edx
20058 +RET_ENDP
20059 #undef v
20060
20061 #define v %esi
20062 @@ -55,6 +59,10 @@ BEGIN(set)
20063 movl %ebx, (v)
20064 movl %ecx, 4(v)
20065 RET_ENDP
20066 +BEGIN(set_unchecked)
20067 + movl %ebx, (v)
20068 + movl %ecx, 4(v)
20069 +RET_ENDP
20070 #undef v
20071
20072 #define v %esi
20073 @@ -70,6 +78,20 @@ RET_ENDP
20074 BEGIN(add)
20075 addl %eax, (v)
20076 adcl %edx, 4(v)
20077 +
20078 +#ifdef CONFIG_PAX_REFCOUNT
20079 + jno 0f
20080 + subl %eax, (v)
20081 + sbbl %edx, 4(v)
20082 + int $4
20083 +0:
20084 + _ASM_EXTABLE(0b, 0b)
20085 +#endif
20086 +
20087 +RET_ENDP
20088 +BEGIN(add_unchecked)
20089 + addl %eax, (v)
20090 + adcl %edx, 4(v)
20091 RET_ENDP
20092 #undef v
20093
20094 @@ -77,6 +99,24 @@ RET_ENDP
20095 BEGIN(add_return)
20096 addl (v), %eax
20097 adcl 4(v), %edx
20098 +
20099 +#ifdef CONFIG_PAX_REFCOUNT
20100 + into
20101 +1234:
20102 + _ASM_EXTABLE(1234b, 2f)
20103 +#endif
20104 +
20105 + movl %eax, (v)
20106 + movl %edx, 4(v)
20107 +
20108 +#ifdef CONFIG_PAX_REFCOUNT
20109 +2:
20110 +#endif
20111 +
20112 +RET_ENDP
20113 +BEGIN(add_return_unchecked)
20114 + addl (v), %eax
20115 + adcl 4(v), %edx
20116 movl %eax, (v)
20117 movl %edx, 4(v)
20118 RET_ENDP
20119 @@ -86,6 +126,20 @@ RET_ENDP
20120 BEGIN(sub)
20121 subl %eax, (v)
20122 sbbl %edx, 4(v)
20123 +
20124 +#ifdef CONFIG_PAX_REFCOUNT
20125 + jno 0f
20126 + addl %eax, (v)
20127 + adcl %edx, 4(v)
20128 + int $4
20129 +0:
20130 + _ASM_EXTABLE(0b, 0b)
20131 +#endif
20132 +
20133 +RET_ENDP
20134 +BEGIN(sub_unchecked)
20135 + subl %eax, (v)
20136 + sbbl %edx, 4(v)
20137 RET_ENDP
20138 #undef v
20139
20140 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20141 sbbl $0, %edx
20142 addl (v), %eax
20143 adcl 4(v), %edx
20144 +
20145 +#ifdef CONFIG_PAX_REFCOUNT
20146 + into
20147 +1234:
20148 + _ASM_EXTABLE(1234b, 2f)
20149 +#endif
20150 +
20151 + movl %eax, (v)
20152 + movl %edx, 4(v)
20153 +
20154 +#ifdef CONFIG_PAX_REFCOUNT
20155 +2:
20156 +#endif
20157 +
20158 +RET_ENDP
20159 +BEGIN(sub_return_unchecked)
20160 + negl %edx
20161 + negl %eax
20162 + sbbl $0, %edx
20163 + addl (v), %eax
20164 + adcl 4(v), %edx
20165 movl %eax, (v)
20166 movl %edx, 4(v)
20167 RET_ENDP
20168 @@ -105,6 +180,20 @@ RET_ENDP
20169 BEGIN(inc)
20170 addl $1, (v)
20171 adcl $0, 4(v)
20172 +
20173 +#ifdef CONFIG_PAX_REFCOUNT
20174 + jno 0f
20175 + subl $1, (v)
20176 + sbbl $0, 4(v)
20177 + int $4
20178 +0:
20179 + _ASM_EXTABLE(0b, 0b)
20180 +#endif
20181 +
20182 +RET_ENDP
20183 +BEGIN(inc_unchecked)
20184 + addl $1, (v)
20185 + adcl $0, 4(v)
20186 RET_ENDP
20187 #undef v
20188
20189 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20190 movl 4(v), %edx
20191 addl $1, %eax
20192 adcl $0, %edx
20193 +
20194 +#ifdef CONFIG_PAX_REFCOUNT
20195 + into
20196 +1234:
20197 + _ASM_EXTABLE(1234b, 2f)
20198 +#endif
20199 +
20200 + movl %eax, (v)
20201 + movl %edx, 4(v)
20202 +
20203 +#ifdef CONFIG_PAX_REFCOUNT
20204 +2:
20205 +#endif
20206 +
20207 +RET_ENDP
20208 +BEGIN(inc_return_unchecked)
20209 + movl (v), %eax
20210 + movl 4(v), %edx
20211 + addl $1, %eax
20212 + adcl $0, %edx
20213 movl %eax, (v)
20214 movl %edx, 4(v)
20215 RET_ENDP
20216 @@ -123,6 +232,20 @@ RET_ENDP
20217 BEGIN(dec)
20218 subl $1, (v)
20219 sbbl $0, 4(v)
20220 +
20221 +#ifdef CONFIG_PAX_REFCOUNT
20222 + jno 0f
20223 + addl $1, (v)
20224 + adcl $0, 4(v)
20225 + int $4
20226 +0:
20227 + _ASM_EXTABLE(0b, 0b)
20228 +#endif
20229 +
20230 +RET_ENDP
20231 +BEGIN(dec_unchecked)
20232 + subl $1, (v)
20233 + sbbl $0, 4(v)
20234 RET_ENDP
20235 #undef v
20236
20237 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20238 movl 4(v), %edx
20239 subl $1, %eax
20240 sbbl $0, %edx
20241 +
20242 +#ifdef CONFIG_PAX_REFCOUNT
20243 + into
20244 +1234:
20245 + _ASM_EXTABLE(1234b, 2f)
20246 +#endif
20247 +
20248 + movl %eax, (v)
20249 + movl %edx, 4(v)
20250 +
20251 +#ifdef CONFIG_PAX_REFCOUNT
20252 +2:
20253 +#endif
20254 +
20255 +RET_ENDP
20256 +BEGIN(dec_return_unchecked)
20257 + movl (v), %eax
20258 + movl 4(v), %edx
20259 + subl $1, %eax
20260 + sbbl $0, %edx
20261 movl %eax, (v)
20262 movl %edx, 4(v)
20263 RET_ENDP
20264 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20265 adcl %edx, %edi
20266 addl (v), %eax
20267 adcl 4(v), %edx
20268 +
20269 +#ifdef CONFIG_PAX_REFCOUNT
20270 + into
20271 +1234:
20272 + _ASM_EXTABLE(1234b, 2f)
20273 +#endif
20274 +
20275 cmpl %eax, %esi
20276 je 3f
20277 1:
20278 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20279 1:
20280 addl $1, %eax
20281 adcl $0, %edx
20282 +
20283 +#ifdef CONFIG_PAX_REFCOUNT
20284 + into
20285 +1234:
20286 + _ASM_EXTABLE(1234b, 2f)
20287 +#endif
20288 +
20289 movl %eax, (v)
20290 movl %edx, 4(v)
20291 movl $1, %eax
20292 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20293 movl 4(v), %edx
20294 subl $1, %eax
20295 sbbl $0, %edx
20296 +
20297 +#ifdef CONFIG_PAX_REFCOUNT
20298 + into
20299 +1234:
20300 + _ASM_EXTABLE(1234b, 1f)
20301 +#endif
20302 +
20303 js 1f
20304 movl %eax, (v)
20305 movl %edx, 4(v)
20306 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20307 index 391a083..d658e9f 100644
20308 --- a/arch/x86/lib/atomic64_cx8_32.S
20309 +++ b/arch/x86/lib/atomic64_cx8_32.S
20310 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20311 CFI_STARTPROC
20312
20313 read64 %ecx
20314 + pax_force_retaddr
20315 ret
20316 CFI_ENDPROC
20317 ENDPROC(atomic64_read_cx8)
20318
20319 +ENTRY(atomic64_read_unchecked_cx8)
20320 + CFI_STARTPROC
20321 +
20322 + read64 %ecx
20323 + pax_force_retaddr
20324 + ret
20325 + CFI_ENDPROC
20326 +ENDPROC(atomic64_read_unchecked_cx8)
20327 +
20328 ENTRY(atomic64_set_cx8)
20329 CFI_STARTPROC
20330
20331 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20332 cmpxchg8b (%esi)
20333 jne 1b
20334
20335 + pax_force_retaddr
20336 ret
20337 CFI_ENDPROC
20338 ENDPROC(atomic64_set_cx8)
20339
20340 +ENTRY(atomic64_set_unchecked_cx8)
20341 + CFI_STARTPROC
20342 +
20343 +1:
20344 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20345 + * are atomic on 586 and newer */
20346 + cmpxchg8b (%esi)
20347 + jne 1b
20348 +
20349 + pax_force_retaddr
20350 + ret
20351 + CFI_ENDPROC
20352 +ENDPROC(atomic64_set_unchecked_cx8)
20353 +
20354 ENTRY(atomic64_xchg_cx8)
20355 CFI_STARTPROC
20356
20357 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20358 cmpxchg8b (%esi)
20359 jne 1b
20360
20361 + pax_force_retaddr
20362 ret
20363 CFI_ENDPROC
20364 ENDPROC(atomic64_xchg_cx8)
20365
20366 -.macro addsub_return func ins insc
20367 -ENTRY(atomic64_\func\()_return_cx8)
20368 +.macro addsub_return func ins insc unchecked=""
20369 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20370 CFI_STARTPROC
20371 SAVE ebp
20372 SAVE ebx
20373 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20374 movl %edx, %ecx
20375 \ins\()l %esi, %ebx
20376 \insc\()l %edi, %ecx
20377 +
20378 +.ifb \unchecked
20379 +#ifdef CONFIG_PAX_REFCOUNT
20380 + into
20381 +2:
20382 + _ASM_EXTABLE(2b, 3f)
20383 +#endif
20384 +.endif
20385 +
20386 LOCK_PREFIX
20387 cmpxchg8b (%ebp)
20388 jne 1b
20389 -
20390 -10:
20391 movl %ebx, %eax
20392 movl %ecx, %edx
20393 +
20394 +.ifb \unchecked
20395 +#ifdef CONFIG_PAX_REFCOUNT
20396 +3:
20397 +#endif
20398 +.endif
20399 +
20400 RESTORE edi
20401 RESTORE esi
20402 RESTORE ebx
20403 RESTORE ebp
20404 + pax_force_retaddr
20405 ret
20406 CFI_ENDPROC
20407 -ENDPROC(atomic64_\func\()_return_cx8)
20408 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20409 .endm
20410
20411 addsub_return add add adc
20412 addsub_return sub sub sbb
20413 +addsub_return add add adc _unchecked
20414 +addsub_return sub sub sbb _unchecked
20415
20416 -.macro incdec_return func ins insc
20417 -ENTRY(atomic64_\func\()_return_cx8)
20418 +.macro incdec_return func ins insc unchecked
20419 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20420 CFI_STARTPROC
20421 SAVE ebx
20422
20423 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20424 movl %edx, %ecx
20425 \ins\()l $1, %ebx
20426 \insc\()l $0, %ecx
20427 +
20428 +.ifb \unchecked
20429 +#ifdef CONFIG_PAX_REFCOUNT
20430 + into
20431 +2:
20432 + _ASM_EXTABLE(2b, 3f)
20433 +#endif
20434 +.endif
20435 +
20436 LOCK_PREFIX
20437 cmpxchg8b (%esi)
20438 jne 1b
20439
20440 -10:
20441 movl %ebx, %eax
20442 movl %ecx, %edx
20443 +
20444 +.ifb \unchecked
20445 +#ifdef CONFIG_PAX_REFCOUNT
20446 +3:
20447 +#endif
20448 +.endif
20449 +
20450 RESTORE ebx
20451 + pax_force_retaddr
20452 ret
20453 CFI_ENDPROC
20454 -ENDPROC(atomic64_\func\()_return_cx8)
20455 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20456 .endm
20457
20458 incdec_return inc add adc
20459 incdec_return dec sub sbb
20460 +incdec_return inc add adc _unchecked
20461 +incdec_return dec sub sbb _unchecked
20462
20463 ENTRY(atomic64_dec_if_positive_cx8)
20464 CFI_STARTPROC
20465 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20466 movl %edx, %ecx
20467 subl $1, %ebx
20468 sbb $0, %ecx
20469 +
20470 +#ifdef CONFIG_PAX_REFCOUNT
20471 + into
20472 +1234:
20473 + _ASM_EXTABLE(1234b, 2f)
20474 +#endif
20475 +
20476 js 2f
20477 LOCK_PREFIX
20478 cmpxchg8b (%esi)
20479 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20480 movl %ebx, %eax
20481 movl %ecx, %edx
20482 RESTORE ebx
20483 + pax_force_retaddr
20484 ret
20485 CFI_ENDPROC
20486 ENDPROC(atomic64_dec_if_positive_cx8)
20487 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20488 movl %edx, %ecx
20489 addl %esi, %ebx
20490 adcl %edi, %ecx
20491 +
20492 +#ifdef CONFIG_PAX_REFCOUNT
20493 + into
20494 +1234:
20495 + _ASM_EXTABLE(1234b, 3f)
20496 +#endif
20497 +
20498 LOCK_PREFIX
20499 cmpxchg8b (%ebp)
20500 jne 1b
20501 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20502 CFI_ADJUST_CFA_OFFSET -8
20503 RESTORE ebx
20504 RESTORE ebp
20505 + pax_force_retaddr
20506 ret
20507 4:
20508 cmpl %edx, 4(%esp)
20509 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20510 movl %edx, %ecx
20511 addl $1, %ebx
20512 adcl $0, %ecx
20513 +
20514 +#ifdef CONFIG_PAX_REFCOUNT
20515 + into
20516 +1234:
20517 + _ASM_EXTABLE(1234b, 3f)
20518 +#endif
20519 +
20520 LOCK_PREFIX
20521 cmpxchg8b (%esi)
20522 jne 1b
20523 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20524 movl $1, %eax
20525 3:
20526 RESTORE ebx
20527 + pax_force_retaddr
20528 ret
20529 4:
20530 testl %edx, %edx
20531 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20532 index 78d16a5..fbcf666 100644
20533 --- a/arch/x86/lib/checksum_32.S
20534 +++ b/arch/x86/lib/checksum_32.S
20535 @@ -28,7 +28,8 @@
20536 #include <linux/linkage.h>
20537 #include <asm/dwarf2.h>
20538 #include <asm/errno.h>
20539 -
20540 +#include <asm/segment.h>
20541 +
20542 /*
20543 * computes a partial checksum, e.g. for TCP/UDP fragments
20544 */
20545 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20546
20547 #define ARGBASE 16
20548 #define FP 12
20549 -
20550 -ENTRY(csum_partial_copy_generic)
20551 +
20552 +ENTRY(csum_partial_copy_generic_to_user)
20553 CFI_STARTPROC
20554 +
20555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20556 + pushl_cfi %gs
20557 + popl_cfi %es
20558 + jmp csum_partial_copy_generic
20559 +#endif
20560 +
20561 +ENTRY(csum_partial_copy_generic_from_user)
20562 +
20563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20564 + pushl_cfi %gs
20565 + popl_cfi %ds
20566 +#endif
20567 +
20568 +ENTRY(csum_partial_copy_generic)
20569 subl $4,%esp
20570 CFI_ADJUST_CFA_OFFSET 4
20571 pushl_cfi %edi
20572 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20573 jmp 4f
20574 SRC(1: movw (%esi), %bx )
20575 addl $2, %esi
20576 -DST( movw %bx, (%edi) )
20577 +DST( movw %bx, %es:(%edi) )
20578 addl $2, %edi
20579 addw %bx, %ax
20580 adcl $0, %eax
20581 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20582 SRC(1: movl (%esi), %ebx )
20583 SRC( movl 4(%esi), %edx )
20584 adcl %ebx, %eax
20585 -DST( movl %ebx, (%edi) )
20586 +DST( movl %ebx, %es:(%edi) )
20587 adcl %edx, %eax
20588 -DST( movl %edx, 4(%edi) )
20589 +DST( movl %edx, %es:4(%edi) )
20590
20591 SRC( movl 8(%esi), %ebx )
20592 SRC( movl 12(%esi), %edx )
20593 adcl %ebx, %eax
20594 -DST( movl %ebx, 8(%edi) )
20595 +DST( movl %ebx, %es:8(%edi) )
20596 adcl %edx, %eax
20597 -DST( movl %edx, 12(%edi) )
20598 +DST( movl %edx, %es:12(%edi) )
20599
20600 SRC( movl 16(%esi), %ebx )
20601 SRC( movl 20(%esi), %edx )
20602 adcl %ebx, %eax
20603 -DST( movl %ebx, 16(%edi) )
20604 +DST( movl %ebx, %es:16(%edi) )
20605 adcl %edx, %eax
20606 -DST( movl %edx, 20(%edi) )
20607 +DST( movl %edx, %es:20(%edi) )
20608
20609 SRC( movl 24(%esi), %ebx )
20610 SRC( movl 28(%esi), %edx )
20611 adcl %ebx, %eax
20612 -DST( movl %ebx, 24(%edi) )
20613 +DST( movl %ebx, %es:24(%edi) )
20614 adcl %edx, %eax
20615 -DST( movl %edx, 28(%edi) )
20616 +DST( movl %edx, %es:28(%edi) )
20617
20618 lea 32(%esi), %esi
20619 lea 32(%edi), %edi
20620 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
20621 shrl $2, %edx # This clears CF
20622 SRC(3: movl (%esi), %ebx )
20623 adcl %ebx, %eax
20624 -DST( movl %ebx, (%edi) )
20625 +DST( movl %ebx, %es:(%edi) )
20626 lea 4(%esi), %esi
20627 lea 4(%edi), %edi
20628 dec %edx
20629 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
20630 jb 5f
20631 SRC( movw (%esi), %cx )
20632 leal 2(%esi), %esi
20633 -DST( movw %cx, (%edi) )
20634 +DST( movw %cx, %es:(%edi) )
20635 leal 2(%edi), %edi
20636 je 6f
20637 shll $16,%ecx
20638 SRC(5: movb (%esi), %cl )
20639 -DST( movb %cl, (%edi) )
20640 +DST( movb %cl, %es:(%edi) )
20641 6: addl %ecx, %eax
20642 adcl $0, %eax
20643 7:
20644 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
20645
20646 6001:
20647 movl ARGBASE+20(%esp), %ebx # src_err_ptr
20648 - movl $-EFAULT, (%ebx)
20649 + movl $-EFAULT, %ss:(%ebx)
20650
20651 # zero the complete destination - computing the rest
20652 # is too much work
20653 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
20654
20655 6002:
20656 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20657 - movl $-EFAULT,(%ebx)
20658 + movl $-EFAULT,%ss:(%ebx)
20659 jmp 5000b
20660
20661 .previous
20662
20663 + pushl_cfi %ss
20664 + popl_cfi %ds
20665 + pushl_cfi %ss
20666 + popl_cfi %es
20667 popl_cfi %ebx
20668 CFI_RESTORE ebx
20669 popl_cfi %esi
20670 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
20671 popl_cfi %ecx # equivalent to addl $4,%esp
20672 ret
20673 CFI_ENDPROC
20674 -ENDPROC(csum_partial_copy_generic)
20675 +ENDPROC(csum_partial_copy_generic_to_user)
20676
20677 #else
20678
20679 /* Version for PentiumII/PPro */
20680
20681 #define ROUND1(x) \
20682 + nop; nop; nop; \
20683 SRC(movl x(%esi), %ebx ) ; \
20684 addl %ebx, %eax ; \
20685 - DST(movl %ebx, x(%edi) ) ;
20686 + DST(movl %ebx, %es:x(%edi)) ;
20687
20688 #define ROUND(x) \
20689 + nop; nop; nop; \
20690 SRC(movl x(%esi), %ebx ) ; \
20691 adcl %ebx, %eax ; \
20692 - DST(movl %ebx, x(%edi) ) ;
20693 + DST(movl %ebx, %es:x(%edi)) ;
20694
20695 #define ARGBASE 12
20696 -
20697 -ENTRY(csum_partial_copy_generic)
20698 +
20699 +ENTRY(csum_partial_copy_generic_to_user)
20700 CFI_STARTPROC
20701 +
20702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20703 + pushl_cfi %gs
20704 + popl_cfi %es
20705 + jmp csum_partial_copy_generic
20706 +#endif
20707 +
20708 +ENTRY(csum_partial_copy_generic_from_user)
20709 +
20710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20711 + pushl_cfi %gs
20712 + popl_cfi %ds
20713 +#endif
20714 +
20715 +ENTRY(csum_partial_copy_generic)
20716 pushl_cfi %ebx
20717 CFI_REL_OFFSET ebx, 0
20718 pushl_cfi %edi
20719 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
20720 subl %ebx, %edi
20721 lea -1(%esi),%edx
20722 andl $-32,%edx
20723 - lea 3f(%ebx,%ebx), %ebx
20724 + lea 3f(%ebx,%ebx,2), %ebx
20725 testl %esi, %esi
20726 jmp *%ebx
20727 1: addl $64,%esi
20728 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
20729 jb 5f
20730 SRC( movw (%esi), %dx )
20731 leal 2(%esi), %esi
20732 -DST( movw %dx, (%edi) )
20733 +DST( movw %dx, %es:(%edi) )
20734 leal 2(%edi), %edi
20735 je 6f
20736 shll $16,%edx
20737 5:
20738 SRC( movb (%esi), %dl )
20739 -DST( movb %dl, (%edi) )
20740 +DST( movb %dl, %es:(%edi) )
20741 6: addl %edx, %eax
20742 adcl $0, %eax
20743 7:
20744 .section .fixup, "ax"
20745 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
20746 - movl $-EFAULT, (%ebx)
20747 + movl $-EFAULT, %ss:(%ebx)
20748 # zero the complete destination (computing the rest is too much work)
20749 movl ARGBASE+8(%esp),%edi # dst
20750 movl ARGBASE+12(%esp),%ecx # len
20751 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
20752 rep; stosb
20753 jmp 7b
20754 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20755 - movl $-EFAULT, (%ebx)
20756 + movl $-EFAULT, %ss:(%ebx)
20757 jmp 7b
20758 .previous
20759
20760 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20761 + pushl_cfi %ss
20762 + popl_cfi %ds
20763 + pushl_cfi %ss
20764 + popl_cfi %es
20765 +#endif
20766 +
20767 popl_cfi %esi
20768 CFI_RESTORE esi
20769 popl_cfi %edi
20770 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
20771 CFI_RESTORE ebx
20772 ret
20773 CFI_ENDPROC
20774 -ENDPROC(csum_partial_copy_generic)
20775 +ENDPROC(csum_partial_copy_generic_to_user)
20776
20777 #undef ROUND
20778 #undef ROUND1
20779 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
20780 index f2145cf..cea889d 100644
20781 --- a/arch/x86/lib/clear_page_64.S
20782 +++ b/arch/x86/lib/clear_page_64.S
20783 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
20784 movl $4096/8,%ecx
20785 xorl %eax,%eax
20786 rep stosq
20787 + pax_force_retaddr
20788 ret
20789 CFI_ENDPROC
20790 ENDPROC(clear_page_c)
20791 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
20792 movl $4096,%ecx
20793 xorl %eax,%eax
20794 rep stosb
20795 + pax_force_retaddr
20796 ret
20797 CFI_ENDPROC
20798 ENDPROC(clear_page_c_e)
20799 @@ -43,6 +45,7 @@ ENTRY(clear_page)
20800 leaq 64(%rdi),%rdi
20801 jnz .Lloop
20802 nop
20803 + pax_force_retaddr
20804 ret
20805 CFI_ENDPROC
20806 .Lclear_page_end:
20807 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
20808
20809 #include <asm/cpufeature.h>
20810
20811 - .section .altinstr_replacement,"ax"
20812 + .section .altinstr_replacement,"a"
20813 1: .byte 0xeb /* jmp <disp8> */
20814 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
20815 2: .byte 0xeb /* jmp <disp8> */
20816 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
20817 index 1e572c5..2a162cd 100644
20818 --- a/arch/x86/lib/cmpxchg16b_emu.S
20819 +++ b/arch/x86/lib/cmpxchg16b_emu.S
20820 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
20821
20822 popf
20823 mov $1, %al
20824 + pax_force_retaddr
20825 ret
20826
20827 not_same:
20828 popf
20829 xor %al,%al
20830 + pax_force_retaddr
20831 ret
20832
20833 CFI_ENDPROC
20834 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
20835 index 01c805b..dccb07f 100644
20836 --- a/arch/x86/lib/copy_page_64.S
20837 +++ b/arch/x86/lib/copy_page_64.S
20838 @@ -9,6 +9,7 @@ copy_page_c:
20839 CFI_STARTPROC
20840 movl $4096/8,%ecx
20841 rep movsq
20842 + pax_force_retaddr
20843 ret
20844 CFI_ENDPROC
20845 ENDPROC(copy_page_c)
20846 @@ -39,7 +40,7 @@ ENTRY(copy_page)
20847 movq 16 (%rsi), %rdx
20848 movq 24 (%rsi), %r8
20849 movq 32 (%rsi), %r9
20850 - movq 40 (%rsi), %r10
20851 + movq 40 (%rsi), %r13
20852 movq 48 (%rsi), %r11
20853 movq 56 (%rsi), %r12
20854
20855 @@ -50,7 +51,7 @@ ENTRY(copy_page)
20856 movq %rdx, 16 (%rdi)
20857 movq %r8, 24 (%rdi)
20858 movq %r9, 32 (%rdi)
20859 - movq %r10, 40 (%rdi)
20860 + movq %r13, 40 (%rdi)
20861 movq %r11, 48 (%rdi)
20862 movq %r12, 56 (%rdi)
20863
20864 @@ -69,7 +70,7 @@ ENTRY(copy_page)
20865 movq 16 (%rsi), %rdx
20866 movq 24 (%rsi), %r8
20867 movq 32 (%rsi), %r9
20868 - movq 40 (%rsi), %r10
20869 + movq 40 (%rsi), %r13
20870 movq 48 (%rsi), %r11
20871 movq 56 (%rsi), %r12
20872
20873 @@ -78,7 +79,7 @@ ENTRY(copy_page)
20874 movq %rdx, 16 (%rdi)
20875 movq %r8, 24 (%rdi)
20876 movq %r9, 32 (%rdi)
20877 - movq %r10, 40 (%rdi)
20878 + movq %r13, 40 (%rdi)
20879 movq %r11, 48 (%rdi)
20880 movq %r12, 56 (%rdi)
20881
20882 @@ -95,6 +96,7 @@ ENTRY(copy_page)
20883 CFI_RESTORE r13
20884 addq $3*8,%rsp
20885 CFI_ADJUST_CFA_OFFSET -3*8
20886 + pax_force_retaddr
20887 ret
20888 .Lcopy_page_end:
20889 CFI_ENDPROC
20890 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
20891
20892 #include <asm/cpufeature.h>
20893
20894 - .section .altinstr_replacement,"ax"
20895 + .section .altinstr_replacement,"a"
20896 1: .byte 0xeb /* jmp <disp8> */
20897 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
20898 2:
20899 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
20900 index 0248402..821c786 100644
20901 --- a/arch/x86/lib/copy_user_64.S
20902 +++ b/arch/x86/lib/copy_user_64.S
20903 @@ -16,6 +16,7 @@
20904 #include <asm/thread_info.h>
20905 #include <asm/cpufeature.h>
20906 #include <asm/alternative-asm.h>
20907 +#include <asm/pgtable.h>
20908
20909 /*
20910 * By placing feature2 after feature1 in altinstructions section, we logically
20911 @@ -29,7 +30,7 @@
20912 .byte 0xe9 /* 32bit jump */
20913 .long \orig-1f /* by default jump to orig */
20914 1:
20915 - .section .altinstr_replacement,"ax"
20916 + .section .altinstr_replacement,"a"
20917 2: .byte 0xe9 /* near jump with 32bit immediate */
20918 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
20919 3: .byte 0xe9 /* near jump with 32bit immediate */
20920 @@ -71,47 +72,20 @@
20921 #endif
20922 .endm
20923
20924 -/* Standard copy_to_user with segment limit checking */
20925 -ENTRY(_copy_to_user)
20926 - CFI_STARTPROC
20927 - GET_THREAD_INFO(%rax)
20928 - movq %rdi,%rcx
20929 - addq %rdx,%rcx
20930 - jc bad_to_user
20931 - cmpq TI_addr_limit(%rax),%rcx
20932 - ja bad_to_user
20933 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20934 - copy_user_generic_unrolled,copy_user_generic_string, \
20935 - copy_user_enhanced_fast_string
20936 - CFI_ENDPROC
20937 -ENDPROC(_copy_to_user)
20938 -
20939 -/* Standard copy_from_user with segment limit checking */
20940 -ENTRY(_copy_from_user)
20941 - CFI_STARTPROC
20942 - GET_THREAD_INFO(%rax)
20943 - movq %rsi,%rcx
20944 - addq %rdx,%rcx
20945 - jc bad_from_user
20946 - cmpq TI_addr_limit(%rax),%rcx
20947 - ja bad_from_user
20948 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20949 - copy_user_generic_unrolled,copy_user_generic_string, \
20950 - copy_user_enhanced_fast_string
20951 - CFI_ENDPROC
20952 -ENDPROC(_copy_from_user)
20953 -
20954 .section .fixup,"ax"
20955 /* must zero dest */
20956 ENTRY(bad_from_user)
20957 bad_from_user:
20958 CFI_STARTPROC
20959 + testl %edx,%edx
20960 + js bad_to_user
20961 movl %edx,%ecx
20962 xorl %eax,%eax
20963 rep
20964 stosb
20965 bad_to_user:
20966 movl %edx,%eax
20967 + pax_force_retaddr
20968 ret
20969 CFI_ENDPROC
20970 ENDPROC(bad_from_user)
20971 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
20972 jz 17f
20973 1: movq (%rsi),%r8
20974 2: movq 1*8(%rsi),%r9
20975 -3: movq 2*8(%rsi),%r10
20976 +3: movq 2*8(%rsi),%rax
20977 4: movq 3*8(%rsi),%r11
20978 5: movq %r8,(%rdi)
20979 6: movq %r9,1*8(%rdi)
20980 -7: movq %r10,2*8(%rdi)
20981 +7: movq %rax,2*8(%rdi)
20982 8: movq %r11,3*8(%rdi)
20983 9: movq 4*8(%rsi),%r8
20984 10: movq 5*8(%rsi),%r9
20985 -11: movq 6*8(%rsi),%r10
20986 +11: movq 6*8(%rsi),%rax
20987 12: movq 7*8(%rsi),%r11
20988 13: movq %r8,4*8(%rdi)
20989 14: movq %r9,5*8(%rdi)
20990 -15: movq %r10,6*8(%rdi)
20991 +15: movq %rax,6*8(%rdi)
20992 16: movq %r11,7*8(%rdi)
20993 leaq 64(%rsi),%rsi
20994 leaq 64(%rdi),%rdi
20995 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
20996 decl %ecx
20997 jnz 21b
20998 23: xor %eax,%eax
20999 + pax_force_retaddr
21000 ret
21001
21002 .section .fixup,"ax"
21003 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21004 3: rep
21005 movsb
21006 4: xorl %eax,%eax
21007 + pax_force_retaddr
21008 ret
21009
21010 .section .fixup,"ax"
21011 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21012 1: rep
21013 movsb
21014 2: xorl %eax,%eax
21015 + pax_force_retaddr
21016 ret
21017
21018 .section .fixup,"ax"
21019 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21020 index cb0c112..e3a6895 100644
21021 --- a/arch/x86/lib/copy_user_nocache_64.S
21022 +++ b/arch/x86/lib/copy_user_nocache_64.S
21023 @@ -8,12 +8,14 @@
21024
21025 #include <linux/linkage.h>
21026 #include <asm/dwarf2.h>
21027 +#include <asm/alternative-asm.h>
21028
21029 #define FIX_ALIGNMENT 1
21030
21031 #include <asm/current.h>
21032 #include <asm/asm-offsets.h>
21033 #include <asm/thread_info.h>
21034 +#include <asm/pgtable.h>
21035
21036 .macro ALIGN_DESTINATION
21037 #ifdef FIX_ALIGNMENT
21038 @@ -50,6 +52,15 @@
21039 */
21040 ENTRY(__copy_user_nocache)
21041 CFI_STARTPROC
21042 +
21043 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21044 + mov $PAX_USER_SHADOW_BASE,%rcx
21045 + cmp %rcx,%rsi
21046 + jae 1f
21047 + add %rcx,%rsi
21048 +1:
21049 +#endif
21050 +
21051 cmpl $8,%edx
21052 jb 20f /* less then 8 bytes, go to byte copy loop */
21053 ALIGN_DESTINATION
21054 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21055 jz 17f
21056 1: movq (%rsi),%r8
21057 2: movq 1*8(%rsi),%r9
21058 -3: movq 2*8(%rsi),%r10
21059 +3: movq 2*8(%rsi),%rax
21060 4: movq 3*8(%rsi),%r11
21061 5: movnti %r8,(%rdi)
21062 6: movnti %r9,1*8(%rdi)
21063 -7: movnti %r10,2*8(%rdi)
21064 +7: movnti %rax,2*8(%rdi)
21065 8: movnti %r11,3*8(%rdi)
21066 9: movq 4*8(%rsi),%r8
21067 10: movq 5*8(%rsi),%r9
21068 -11: movq 6*8(%rsi),%r10
21069 +11: movq 6*8(%rsi),%rax
21070 12: movq 7*8(%rsi),%r11
21071 13: movnti %r8,4*8(%rdi)
21072 14: movnti %r9,5*8(%rdi)
21073 -15: movnti %r10,6*8(%rdi)
21074 +15: movnti %rax,6*8(%rdi)
21075 16: movnti %r11,7*8(%rdi)
21076 leaq 64(%rsi),%rsi
21077 leaq 64(%rdi),%rdi
21078 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21079 jnz 21b
21080 23: xorl %eax,%eax
21081 sfence
21082 + pax_force_retaddr
21083 ret
21084
21085 .section .fixup,"ax"
21086 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21087 index fb903b7..c92b7f7 100644
21088 --- a/arch/x86/lib/csum-copy_64.S
21089 +++ b/arch/x86/lib/csum-copy_64.S
21090 @@ -8,6 +8,7 @@
21091 #include <linux/linkage.h>
21092 #include <asm/dwarf2.h>
21093 #include <asm/errno.h>
21094 +#include <asm/alternative-asm.h>
21095
21096 /*
21097 * Checksum copy with exception handling.
21098 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21099 CFI_RESTORE rbp
21100 addq $7*8, %rsp
21101 CFI_ADJUST_CFA_OFFSET -7*8
21102 + pax_force_retaddr 0, 1
21103 ret
21104 CFI_RESTORE_STATE
21105
21106 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21107 index 459b58a..9570bc7 100644
21108 --- a/arch/x86/lib/csum-wrappers_64.c
21109 +++ b/arch/x86/lib/csum-wrappers_64.c
21110 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21111 len -= 2;
21112 }
21113 }
21114 - isum = csum_partial_copy_generic((__force const void *)src,
21115 +
21116 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21117 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21118 + src += PAX_USER_SHADOW_BASE;
21119 +#endif
21120 +
21121 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21122 dst, len, isum, errp, NULL);
21123 if (unlikely(*errp))
21124 goto out_err;
21125 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21126 }
21127
21128 *errp = 0;
21129 - return csum_partial_copy_generic(src, (void __force *)dst,
21130 +
21131 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21132 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21133 + dst += PAX_USER_SHADOW_BASE;
21134 +#endif
21135 +
21136 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21137 len, isum, NULL, errp);
21138 }
21139 EXPORT_SYMBOL(csum_partial_copy_to_user);
21140 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21141 index 51f1504..ddac4c1 100644
21142 --- a/arch/x86/lib/getuser.S
21143 +++ b/arch/x86/lib/getuser.S
21144 @@ -33,15 +33,38 @@
21145 #include <asm/asm-offsets.h>
21146 #include <asm/thread_info.h>
21147 #include <asm/asm.h>
21148 +#include <asm/segment.h>
21149 +#include <asm/pgtable.h>
21150 +#include <asm/alternative-asm.h>
21151 +
21152 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21153 +#define __copyuser_seg gs;
21154 +#else
21155 +#define __copyuser_seg
21156 +#endif
21157
21158 .text
21159 ENTRY(__get_user_1)
21160 CFI_STARTPROC
21161 +
21162 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21163 GET_THREAD_INFO(%_ASM_DX)
21164 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21165 jae bad_get_user
21166 -1: movzb (%_ASM_AX),%edx
21167 +
21168 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21169 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21170 + cmp %_ASM_DX,%_ASM_AX
21171 + jae 1234f
21172 + add %_ASM_DX,%_ASM_AX
21173 +1234:
21174 +#endif
21175 +
21176 +#endif
21177 +
21178 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21179 xor %eax,%eax
21180 + pax_force_retaddr
21181 ret
21182 CFI_ENDPROC
21183 ENDPROC(__get_user_1)
21184 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21185 ENTRY(__get_user_2)
21186 CFI_STARTPROC
21187 add $1,%_ASM_AX
21188 +
21189 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21190 jc bad_get_user
21191 GET_THREAD_INFO(%_ASM_DX)
21192 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21193 jae bad_get_user
21194 -2: movzwl -1(%_ASM_AX),%edx
21195 +
21196 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21197 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21198 + cmp %_ASM_DX,%_ASM_AX
21199 + jae 1234f
21200 + add %_ASM_DX,%_ASM_AX
21201 +1234:
21202 +#endif
21203 +
21204 +#endif
21205 +
21206 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21207 xor %eax,%eax
21208 + pax_force_retaddr
21209 ret
21210 CFI_ENDPROC
21211 ENDPROC(__get_user_2)
21212 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21213 ENTRY(__get_user_4)
21214 CFI_STARTPROC
21215 add $3,%_ASM_AX
21216 +
21217 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21218 jc bad_get_user
21219 GET_THREAD_INFO(%_ASM_DX)
21220 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21221 jae bad_get_user
21222 -3: mov -3(%_ASM_AX),%edx
21223 +
21224 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21225 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21226 + cmp %_ASM_DX,%_ASM_AX
21227 + jae 1234f
21228 + add %_ASM_DX,%_ASM_AX
21229 +1234:
21230 +#endif
21231 +
21232 +#endif
21233 +
21234 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21235 xor %eax,%eax
21236 + pax_force_retaddr
21237 ret
21238 CFI_ENDPROC
21239 ENDPROC(__get_user_4)
21240 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21241 GET_THREAD_INFO(%_ASM_DX)
21242 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21243 jae bad_get_user
21244 +
21245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21246 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21247 + cmp %_ASM_DX,%_ASM_AX
21248 + jae 1234f
21249 + add %_ASM_DX,%_ASM_AX
21250 +1234:
21251 +#endif
21252 +
21253 4: movq -7(%_ASM_AX),%_ASM_DX
21254 xor %eax,%eax
21255 + pax_force_retaddr
21256 ret
21257 CFI_ENDPROC
21258 ENDPROC(__get_user_8)
21259 @@ -91,6 +152,7 @@ bad_get_user:
21260 CFI_STARTPROC
21261 xor %edx,%edx
21262 mov $(-EFAULT),%_ASM_AX
21263 + pax_force_retaddr
21264 ret
21265 CFI_ENDPROC
21266 END(bad_get_user)
21267 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21268 index 5a1f9f3..ba9f577 100644
21269 --- a/arch/x86/lib/insn.c
21270 +++ b/arch/x86/lib/insn.c
21271 @@ -21,6 +21,11 @@
21272 #include <linux/string.h>
21273 #include <asm/inat.h>
21274 #include <asm/insn.h>
21275 +#ifdef __KERNEL__
21276 +#include <asm/pgtable_types.h>
21277 +#else
21278 +#define ktla_ktva(addr) addr
21279 +#endif
21280
21281 /* Verify next sizeof(t) bytes can be on the same instruction */
21282 #define validate_next(t, insn, n) \
21283 @@ -49,8 +54,8 @@
21284 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21285 {
21286 memset(insn, 0, sizeof(*insn));
21287 - insn->kaddr = kaddr;
21288 - insn->next_byte = kaddr;
21289 + insn->kaddr = ktla_ktva(kaddr);
21290 + insn->next_byte = ktla_ktva(kaddr);
21291 insn->x86_64 = x86_64 ? 1 : 0;
21292 insn->opnd_bytes = 4;
21293 if (x86_64)
21294 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21295 index 05a95e7..326f2fa 100644
21296 --- a/arch/x86/lib/iomap_copy_64.S
21297 +++ b/arch/x86/lib/iomap_copy_64.S
21298 @@ -17,6 +17,7 @@
21299
21300 #include <linux/linkage.h>
21301 #include <asm/dwarf2.h>
21302 +#include <asm/alternative-asm.h>
21303
21304 /*
21305 * override generic version in lib/iomap_copy.c
21306 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21307 CFI_STARTPROC
21308 movl %edx,%ecx
21309 rep movsd
21310 + pax_force_retaddr
21311 ret
21312 CFI_ENDPROC
21313 ENDPROC(__iowrite32_copy)
21314 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21315 index efbf2a0..8893637 100644
21316 --- a/arch/x86/lib/memcpy_64.S
21317 +++ b/arch/x86/lib/memcpy_64.S
21318 @@ -34,6 +34,7 @@
21319 rep movsq
21320 movl %edx, %ecx
21321 rep movsb
21322 + pax_force_retaddr
21323 ret
21324 .Lmemcpy_e:
21325 .previous
21326 @@ -51,6 +52,7 @@
21327
21328 movl %edx, %ecx
21329 rep movsb
21330 + pax_force_retaddr
21331 ret
21332 .Lmemcpy_e_e:
21333 .previous
21334 @@ -81,13 +83,13 @@ ENTRY(memcpy)
21335 */
21336 movq 0*8(%rsi), %r8
21337 movq 1*8(%rsi), %r9
21338 - movq 2*8(%rsi), %r10
21339 + movq 2*8(%rsi), %rcx
21340 movq 3*8(%rsi), %r11
21341 leaq 4*8(%rsi), %rsi
21342
21343 movq %r8, 0*8(%rdi)
21344 movq %r9, 1*8(%rdi)
21345 - movq %r10, 2*8(%rdi)
21346 + movq %rcx, 2*8(%rdi)
21347 movq %r11, 3*8(%rdi)
21348 leaq 4*8(%rdi), %rdi
21349 jae .Lcopy_forward_loop
21350 @@ -110,12 +112,12 @@ ENTRY(memcpy)
21351 subq $0x20, %rdx
21352 movq -1*8(%rsi), %r8
21353 movq -2*8(%rsi), %r9
21354 - movq -3*8(%rsi), %r10
21355 + movq -3*8(%rsi), %rcx
21356 movq -4*8(%rsi), %r11
21357 leaq -4*8(%rsi), %rsi
21358 movq %r8, -1*8(%rdi)
21359 movq %r9, -2*8(%rdi)
21360 - movq %r10, -3*8(%rdi)
21361 + movq %rcx, -3*8(%rdi)
21362 movq %r11, -4*8(%rdi)
21363 leaq -4*8(%rdi), %rdi
21364 jae .Lcopy_backward_loop
21365 @@ -135,12 +137,13 @@ ENTRY(memcpy)
21366 */
21367 movq 0*8(%rsi), %r8
21368 movq 1*8(%rsi), %r9
21369 - movq -2*8(%rsi, %rdx), %r10
21370 + movq -2*8(%rsi, %rdx), %rcx
21371 movq -1*8(%rsi, %rdx), %r11
21372 movq %r8, 0*8(%rdi)
21373 movq %r9, 1*8(%rdi)
21374 - movq %r10, -2*8(%rdi, %rdx)
21375 + movq %rcx, -2*8(%rdi, %rdx)
21376 movq %r11, -1*8(%rdi, %rdx)
21377 + pax_force_retaddr
21378 retq
21379 .p2align 4
21380 .Lless_16bytes:
21381 @@ -153,6 +156,7 @@ ENTRY(memcpy)
21382 movq -1*8(%rsi, %rdx), %r9
21383 movq %r8, 0*8(%rdi)
21384 movq %r9, -1*8(%rdi, %rdx)
21385 + pax_force_retaddr
21386 retq
21387 .p2align 4
21388 .Lless_8bytes:
21389 @@ -166,6 +170,7 @@ ENTRY(memcpy)
21390 movl -4(%rsi, %rdx), %r8d
21391 movl %ecx, (%rdi)
21392 movl %r8d, -4(%rdi, %rdx)
21393 + pax_force_retaddr
21394 retq
21395 .p2align 4
21396 .Lless_3bytes:
21397 @@ -183,6 +188,7 @@ ENTRY(memcpy)
21398 jnz .Lloop_1
21399
21400 .Lend:
21401 + pax_force_retaddr
21402 retq
21403 CFI_ENDPROC
21404 ENDPROC(memcpy)
21405 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21406 index ee16461..c39c199 100644
21407 --- a/arch/x86/lib/memmove_64.S
21408 +++ b/arch/x86/lib/memmove_64.S
21409 @@ -61,13 +61,13 @@ ENTRY(memmove)
21410 5:
21411 sub $0x20, %rdx
21412 movq 0*8(%rsi), %r11
21413 - movq 1*8(%rsi), %r10
21414 + movq 1*8(%rsi), %rcx
21415 movq 2*8(%rsi), %r9
21416 movq 3*8(%rsi), %r8
21417 leaq 4*8(%rsi), %rsi
21418
21419 movq %r11, 0*8(%rdi)
21420 - movq %r10, 1*8(%rdi)
21421 + movq %rcx, 1*8(%rdi)
21422 movq %r9, 2*8(%rdi)
21423 movq %r8, 3*8(%rdi)
21424 leaq 4*8(%rdi), %rdi
21425 @@ -81,10 +81,10 @@ ENTRY(memmove)
21426 4:
21427 movq %rdx, %rcx
21428 movq -8(%rsi, %rdx), %r11
21429 - lea -8(%rdi, %rdx), %r10
21430 + lea -8(%rdi, %rdx), %r9
21431 shrq $3, %rcx
21432 rep movsq
21433 - movq %r11, (%r10)
21434 + movq %r11, (%r9)
21435 jmp 13f
21436 .Lmemmove_end_forward:
21437
21438 @@ -95,14 +95,14 @@ ENTRY(memmove)
21439 7:
21440 movq %rdx, %rcx
21441 movq (%rsi), %r11
21442 - movq %rdi, %r10
21443 + movq %rdi, %r9
21444 leaq -8(%rsi, %rdx), %rsi
21445 leaq -8(%rdi, %rdx), %rdi
21446 shrq $3, %rcx
21447 std
21448 rep movsq
21449 cld
21450 - movq %r11, (%r10)
21451 + movq %r11, (%r9)
21452 jmp 13f
21453
21454 /*
21455 @@ -127,13 +127,13 @@ ENTRY(memmove)
21456 8:
21457 subq $0x20, %rdx
21458 movq -1*8(%rsi), %r11
21459 - movq -2*8(%rsi), %r10
21460 + movq -2*8(%rsi), %rcx
21461 movq -3*8(%rsi), %r9
21462 movq -4*8(%rsi), %r8
21463 leaq -4*8(%rsi), %rsi
21464
21465 movq %r11, -1*8(%rdi)
21466 - movq %r10, -2*8(%rdi)
21467 + movq %rcx, -2*8(%rdi)
21468 movq %r9, -3*8(%rdi)
21469 movq %r8, -4*8(%rdi)
21470 leaq -4*8(%rdi), %rdi
21471 @@ -151,11 +151,11 @@ ENTRY(memmove)
21472 * Move data from 16 bytes to 31 bytes.
21473 */
21474 movq 0*8(%rsi), %r11
21475 - movq 1*8(%rsi), %r10
21476 + movq 1*8(%rsi), %rcx
21477 movq -2*8(%rsi, %rdx), %r9
21478 movq -1*8(%rsi, %rdx), %r8
21479 movq %r11, 0*8(%rdi)
21480 - movq %r10, 1*8(%rdi)
21481 + movq %rcx, 1*8(%rdi)
21482 movq %r9, -2*8(%rdi, %rdx)
21483 movq %r8, -1*8(%rdi, %rdx)
21484 jmp 13f
21485 @@ -167,9 +167,9 @@ ENTRY(memmove)
21486 * Move data from 8 bytes to 15 bytes.
21487 */
21488 movq 0*8(%rsi), %r11
21489 - movq -1*8(%rsi, %rdx), %r10
21490 + movq -1*8(%rsi, %rdx), %r9
21491 movq %r11, 0*8(%rdi)
21492 - movq %r10, -1*8(%rdi, %rdx)
21493 + movq %r9, -1*8(%rdi, %rdx)
21494 jmp 13f
21495 10:
21496 cmpq $4, %rdx
21497 @@ -178,9 +178,9 @@ ENTRY(memmove)
21498 * Move data from 4 bytes to 7 bytes.
21499 */
21500 movl (%rsi), %r11d
21501 - movl -4(%rsi, %rdx), %r10d
21502 + movl -4(%rsi, %rdx), %r9d
21503 movl %r11d, (%rdi)
21504 - movl %r10d, -4(%rdi, %rdx)
21505 + movl %r9d, -4(%rdi, %rdx)
21506 jmp 13f
21507 11:
21508 cmp $2, %rdx
21509 @@ -189,9 +189,9 @@ ENTRY(memmove)
21510 * Move data from 2 bytes to 3 bytes.
21511 */
21512 movw (%rsi), %r11w
21513 - movw -2(%rsi, %rdx), %r10w
21514 + movw -2(%rsi, %rdx), %r9w
21515 movw %r11w, (%rdi)
21516 - movw %r10w, -2(%rdi, %rdx)
21517 + movw %r9w, -2(%rdi, %rdx)
21518 jmp 13f
21519 12:
21520 cmp $1, %rdx
21521 @@ -202,6 +202,7 @@ ENTRY(memmove)
21522 movb (%rsi), %r11b
21523 movb %r11b, (%rdi)
21524 13:
21525 + pax_force_retaddr
21526 retq
21527 CFI_ENDPROC
21528
21529 @@ -210,6 +211,7 @@ ENTRY(memmove)
21530 /* Forward moving data. */
21531 movq %rdx, %rcx
21532 rep movsb
21533 + pax_force_retaddr
21534 retq
21535 .Lmemmove_end_forward_efs:
21536 .previous
21537 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21538 index 79bd454..dff325a 100644
21539 --- a/arch/x86/lib/memset_64.S
21540 +++ b/arch/x86/lib/memset_64.S
21541 @@ -31,6 +31,7 @@
21542 movl %r8d,%ecx
21543 rep stosb
21544 movq %r9,%rax
21545 + pax_force_retaddr
21546 ret
21547 .Lmemset_e:
21548 .previous
21549 @@ -53,6 +54,7 @@
21550 movl %edx,%ecx
21551 rep stosb
21552 movq %r9,%rax
21553 + pax_force_retaddr
21554 ret
21555 .Lmemset_e_e:
21556 .previous
21557 @@ -60,13 +62,13 @@
21558 ENTRY(memset)
21559 ENTRY(__memset)
21560 CFI_STARTPROC
21561 - movq %rdi,%r10
21562 movq %rdx,%r11
21563
21564 /* expand byte value */
21565 movzbl %sil,%ecx
21566 movabs $0x0101010101010101,%rax
21567 mul %rcx /* with rax, clobbers rdx */
21568 + movq %rdi,%rdx
21569
21570 /* align dst */
21571 movl %edi,%r9d
21572 @@ -120,7 +122,8 @@ ENTRY(__memset)
21573 jnz .Lloop_1
21574
21575 .Lende:
21576 - movq %r10,%rax
21577 + movq %rdx,%rax
21578 + pax_force_retaddr
21579 ret
21580
21581 CFI_RESTORE_STATE
21582 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21583 index c9f2d9b..e7fd2c0 100644
21584 --- a/arch/x86/lib/mmx_32.c
21585 +++ b/arch/x86/lib/mmx_32.c
21586 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21587 {
21588 void *p;
21589 int i;
21590 + unsigned long cr0;
21591
21592 if (unlikely(in_interrupt()))
21593 return __memcpy(to, from, len);
21594 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21595 kernel_fpu_begin();
21596
21597 __asm__ __volatile__ (
21598 - "1: prefetch (%0)\n" /* This set is 28 bytes */
21599 - " prefetch 64(%0)\n"
21600 - " prefetch 128(%0)\n"
21601 - " prefetch 192(%0)\n"
21602 - " prefetch 256(%0)\n"
21603 + "1: prefetch (%1)\n" /* This set is 28 bytes */
21604 + " prefetch 64(%1)\n"
21605 + " prefetch 128(%1)\n"
21606 + " prefetch 192(%1)\n"
21607 + " prefetch 256(%1)\n"
21608 "2: \n"
21609 ".section .fixup, \"ax\"\n"
21610 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21611 + "3: \n"
21612 +
21613 +#ifdef CONFIG_PAX_KERNEXEC
21614 + " movl %%cr0, %0\n"
21615 + " movl %0, %%eax\n"
21616 + " andl $0xFFFEFFFF, %%eax\n"
21617 + " movl %%eax, %%cr0\n"
21618 +#endif
21619 +
21620 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21621 +
21622 +#ifdef CONFIG_PAX_KERNEXEC
21623 + " movl %0, %%cr0\n"
21624 +#endif
21625 +
21626 " jmp 2b\n"
21627 ".previous\n"
21628 _ASM_EXTABLE(1b, 3b)
21629 - : : "r" (from));
21630 + : "=&r" (cr0) : "r" (from) : "ax");
21631
21632 for ( ; i > 5; i--) {
21633 __asm__ __volatile__ (
21634 - "1: prefetch 320(%0)\n"
21635 - "2: movq (%0), %%mm0\n"
21636 - " movq 8(%0), %%mm1\n"
21637 - " movq 16(%0), %%mm2\n"
21638 - " movq 24(%0), %%mm3\n"
21639 - " movq %%mm0, (%1)\n"
21640 - " movq %%mm1, 8(%1)\n"
21641 - " movq %%mm2, 16(%1)\n"
21642 - " movq %%mm3, 24(%1)\n"
21643 - " movq 32(%0), %%mm0\n"
21644 - " movq 40(%0), %%mm1\n"
21645 - " movq 48(%0), %%mm2\n"
21646 - " movq 56(%0), %%mm3\n"
21647 - " movq %%mm0, 32(%1)\n"
21648 - " movq %%mm1, 40(%1)\n"
21649 - " movq %%mm2, 48(%1)\n"
21650 - " movq %%mm3, 56(%1)\n"
21651 + "1: prefetch 320(%1)\n"
21652 + "2: movq (%1), %%mm0\n"
21653 + " movq 8(%1), %%mm1\n"
21654 + " movq 16(%1), %%mm2\n"
21655 + " movq 24(%1), %%mm3\n"
21656 + " movq %%mm0, (%2)\n"
21657 + " movq %%mm1, 8(%2)\n"
21658 + " movq %%mm2, 16(%2)\n"
21659 + " movq %%mm3, 24(%2)\n"
21660 + " movq 32(%1), %%mm0\n"
21661 + " movq 40(%1), %%mm1\n"
21662 + " movq 48(%1), %%mm2\n"
21663 + " movq 56(%1), %%mm3\n"
21664 + " movq %%mm0, 32(%2)\n"
21665 + " movq %%mm1, 40(%2)\n"
21666 + " movq %%mm2, 48(%2)\n"
21667 + " movq %%mm3, 56(%2)\n"
21668 ".section .fixup, \"ax\"\n"
21669 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21670 + "3:\n"
21671 +
21672 +#ifdef CONFIG_PAX_KERNEXEC
21673 + " movl %%cr0, %0\n"
21674 + " movl %0, %%eax\n"
21675 + " andl $0xFFFEFFFF, %%eax\n"
21676 + " movl %%eax, %%cr0\n"
21677 +#endif
21678 +
21679 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21680 +
21681 +#ifdef CONFIG_PAX_KERNEXEC
21682 + " movl %0, %%cr0\n"
21683 +#endif
21684 +
21685 " jmp 2b\n"
21686 ".previous\n"
21687 _ASM_EXTABLE(1b, 3b)
21688 - : : "r" (from), "r" (to) : "memory");
21689 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21690
21691 from += 64;
21692 to += 64;
21693 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
21694 static void fast_copy_page(void *to, void *from)
21695 {
21696 int i;
21697 + unsigned long cr0;
21698
21699 kernel_fpu_begin();
21700
21701 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
21702 * but that is for later. -AV
21703 */
21704 __asm__ __volatile__(
21705 - "1: prefetch (%0)\n"
21706 - " prefetch 64(%0)\n"
21707 - " prefetch 128(%0)\n"
21708 - " prefetch 192(%0)\n"
21709 - " prefetch 256(%0)\n"
21710 + "1: prefetch (%1)\n"
21711 + " prefetch 64(%1)\n"
21712 + " prefetch 128(%1)\n"
21713 + " prefetch 192(%1)\n"
21714 + " prefetch 256(%1)\n"
21715 "2: \n"
21716 ".section .fixup, \"ax\"\n"
21717 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21718 + "3: \n"
21719 +
21720 +#ifdef CONFIG_PAX_KERNEXEC
21721 + " movl %%cr0, %0\n"
21722 + " movl %0, %%eax\n"
21723 + " andl $0xFFFEFFFF, %%eax\n"
21724 + " movl %%eax, %%cr0\n"
21725 +#endif
21726 +
21727 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21728 +
21729 +#ifdef CONFIG_PAX_KERNEXEC
21730 + " movl %0, %%cr0\n"
21731 +#endif
21732 +
21733 " jmp 2b\n"
21734 ".previous\n"
21735 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21736 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21737
21738 for (i = 0; i < (4096-320)/64; i++) {
21739 __asm__ __volatile__ (
21740 - "1: prefetch 320(%0)\n"
21741 - "2: movq (%0), %%mm0\n"
21742 - " movntq %%mm0, (%1)\n"
21743 - " movq 8(%0), %%mm1\n"
21744 - " movntq %%mm1, 8(%1)\n"
21745 - " movq 16(%0), %%mm2\n"
21746 - " movntq %%mm2, 16(%1)\n"
21747 - " movq 24(%0), %%mm3\n"
21748 - " movntq %%mm3, 24(%1)\n"
21749 - " movq 32(%0), %%mm4\n"
21750 - " movntq %%mm4, 32(%1)\n"
21751 - " movq 40(%0), %%mm5\n"
21752 - " movntq %%mm5, 40(%1)\n"
21753 - " movq 48(%0), %%mm6\n"
21754 - " movntq %%mm6, 48(%1)\n"
21755 - " movq 56(%0), %%mm7\n"
21756 - " movntq %%mm7, 56(%1)\n"
21757 + "1: prefetch 320(%1)\n"
21758 + "2: movq (%1), %%mm0\n"
21759 + " movntq %%mm0, (%2)\n"
21760 + " movq 8(%1), %%mm1\n"
21761 + " movntq %%mm1, 8(%2)\n"
21762 + " movq 16(%1), %%mm2\n"
21763 + " movntq %%mm2, 16(%2)\n"
21764 + " movq 24(%1), %%mm3\n"
21765 + " movntq %%mm3, 24(%2)\n"
21766 + " movq 32(%1), %%mm4\n"
21767 + " movntq %%mm4, 32(%2)\n"
21768 + " movq 40(%1), %%mm5\n"
21769 + " movntq %%mm5, 40(%2)\n"
21770 + " movq 48(%1), %%mm6\n"
21771 + " movntq %%mm6, 48(%2)\n"
21772 + " movq 56(%1), %%mm7\n"
21773 + " movntq %%mm7, 56(%2)\n"
21774 ".section .fixup, \"ax\"\n"
21775 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21776 + "3:\n"
21777 +
21778 +#ifdef CONFIG_PAX_KERNEXEC
21779 + " movl %%cr0, %0\n"
21780 + " movl %0, %%eax\n"
21781 + " andl $0xFFFEFFFF, %%eax\n"
21782 + " movl %%eax, %%cr0\n"
21783 +#endif
21784 +
21785 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21786 +
21787 +#ifdef CONFIG_PAX_KERNEXEC
21788 + " movl %0, %%cr0\n"
21789 +#endif
21790 +
21791 " jmp 2b\n"
21792 ".previous\n"
21793 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
21794 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21795
21796 from += 64;
21797 to += 64;
21798 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
21799 static void fast_copy_page(void *to, void *from)
21800 {
21801 int i;
21802 + unsigned long cr0;
21803
21804 kernel_fpu_begin();
21805
21806 __asm__ __volatile__ (
21807 - "1: prefetch (%0)\n"
21808 - " prefetch 64(%0)\n"
21809 - " prefetch 128(%0)\n"
21810 - " prefetch 192(%0)\n"
21811 - " prefetch 256(%0)\n"
21812 + "1: prefetch (%1)\n"
21813 + " prefetch 64(%1)\n"
21814 + " prefetch 128(%1)\n"
21815 + " prefetch 192(%1)\n"
21816 + " prefetch 256(%1)\n"
21817 "2: \n"
21818 ".section .fixup, \"ax\"\n"
21819 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21820 + "3: \n"
21821 +
21822 +#ifdef CONFIG_PAX_KERNEXEC
21823 + " movl %%cr0, %0\n"
21824 + " movl %0, %%eax\n"
21825 + " andl $0xFFFEFFFF, %%eax\n"
21826 + " movl %%eax, %%cr0\n"
21827 +#endif
21828 +
21829 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21830 +
21831 +#ifdef CONFIG_PAX_KERNEXEC
21832 + " movl %0, %%cr0\n"
21833 +#endif
21834 +
21835 " jmp 2b\n"
21836 ".previous\n"
21837 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21838 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21839
21840 for (i = 0; i < 4096/64; i++) {
21841 __asm__ __volatile__ (
21842 - "1: prefetch 320(%0)\n"
21843 - "2: movq (%0), %%mm0\n"
21844 - " movq 8(%0), %%mm1\n"
21845 - " movq 16(%0), %%mm2\n"
21846 - " movq 24(%0), %%mm3\n"
21847 - " movq %%mm0, (%1)\n"
21848 - " movq %%mm1, 8(%1)\n"
21849 - " movq %%mm2, 16(%1)\n"
21850 - " movq %%mm3, 24(%1)\n"
21851 - " movq 32(%0), %%mm0\n"
21852 - " movq 40(%0), %%mm1\n"
21853 - " movq 48(%0), %%mm2\n"
21854 - " movq 56(%0), %%mm3\n"
21855 - " movq %%mm0, 32(%1)\n"
21856 - " movq %%mm1, 40(%1)\n"
21857 - " movq %%mm2, 48(%1)\n"
21858 - " movq %%mm3, 56(%1)\n"
21859 + "1: prefetch 320(%1)\n"
21860 + "2: movq (%1), %%mm0\n"
21861 + " movq 8(%1), %%mm1\n"
21862 + " movq 16(%1), %%mm2\n"
21863 + " movq 24(%1), %%mm3\n"
21864 + " movq %%mm0, (%2)\n"
21865 + " movq %%mm1, 8(%2)\n"
21866 + " movq %%mm2, 16(%2)\n"
21867 + " movq %%mm3, 24(%2)\n"
21868 + " movq 32(%1), %%mm0\n"
21869 + " movq 40(%1), %%mm1\n"
21870 + " movq 48(%1), %%mm2\n"
21871 + " movq 56(%1), %%mm3\n"
21872 + " movq %%mm0, 32(%2)\n"
21873 + " movq %%mm1, 40(%2)\n"
21874 + " movq %%mm2, 48(%2)\n"
21875 + " movq %%mm3, 56(%2)\n"
21876 ".section .fixup, \"ax\"\n"
21877 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21878 + "3:\n"
21879 +
21880 +#ifdef CONFIG_PAX_KERNEXEC
21881 + " movl %%cr0, %0\n"
21882 + " movl %0, %%eax\n"
21883 + " andl $0xFFFEFFFF, %%eax\n"
21884 + " movl %%eax, %%cr0\n"
21885 +#endif
21886 +
21887 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21888 +
21889 +#ifdef CONFIG_PAX_KERNEXEC
21890 + " movl %0, %%cr0\n"
21891 +#endif
21892 +
21893 " jmp 2b\n"
21894 ".previous\n"
21895 _ASM_EXTABLE(1b, 3b)
21896 - : : "r" (from), "r" (to) : "memory");
21897 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21898
21899 from += 64;
21900 to += 64;
21901 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
21902 index 69fa106..adda88b 100644
21903 --- a/arch/x86/lib/msr-reg.S
21904 +++ b/arch/x86/lib/msr-reg.S
21905 @@ -3,6 +3,7 @@
21906 #include <asm/dwarf2.h>
21907 #include <asm/asm.h>
21908 #include <asm/msr.h>
21909 +#include <asm/alternative-asm.h>
21910
21911 #ifdef CONFIG_X86_64
21912 /*
21913 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
21914 CFI_STARTPROC
21915 pushq_cfi %rbx
21916 pushq_cfi %rbp
21917 - movq %rdi, %r10 /* Save pointer */
21918 + movq %rdi, %r9 /* Save pointer */
21919 xorl %r11d, %r11d /* Return value */
21920 movl (%rdi), %eax
21921 movl 4(%rdi), %ecx
21922 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
21923 movl 28(%rdi), %edi
21924 CFI_REMEMBER_STATE
21925 1: \op
21926 -2: movl %eax, (%r10)
21927 +2: movl %eax, (%r9)
21928 movl %r11d, %eax /* Return value */
21929 - movl %ecx, 4(%r10)
21930 - movl %edx, 8(%r10)
21931 - movl %ebx, 12(%r10)
21932 - movl %ebp, 20(%r10)
21933 - movl %esi, 24(%r10)
21934 - movl %edi, 28(%r10)
21935 + movl %ecx, 4(%r9)
21936 + movl %edx, 8(%r9)
21937 + movl %ebx, 12(%r9)
21938 + movl %ebp, 20(%r9)
21939 + movl %esi, 24(%r9)
21940 + movl %edi, 28(%r9)
21941 popq_cfi %rbp
21942 popq_cfi %rbx
21943 + pax_force_retaddr
21944 ret
21945 3:
21946 CFI_RESTORE_STATE
21947 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
21948 index 36b0d15..d381858 100644
21949 --- a/arch/x86/lib/putuser.S
21950 +++ b/arch/x86/lib/putuser.S
21951 @@ -15,7 +15,9 @@
21952 #include <asm/thread_info.h>
21953 #include <asm/errno.h>
21954 #include <asm/asm.h>
21955 -
21956 +#include <asm/segment.h>
21957 +#include <asm/pgtable.h>
21958 +#include <asm/alternative-asm.h>
21959
21960 /*
21961 * __put_user_X
21962 @@ -29,52 +31,119 @@
21963 * as they get called from within inline assembly.
21964 */
21965
21966 -#define ENTER CFI_STARTPROC ; \
21967 - GET_THREAD_INFO(%_ASM_BX)
21968 -#define EXIT ret ; \
21969 +#define ENTER CFI_STARTPROC
21970 +#define EXIT pax_force_retaddr; ret ; \
21971 CFI_ENDPROC
21972
21973 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21974 +#define _DEST %_ASM_CX,%_ASM_BX
21975 +#else
21976 +#define _DEST %_ASM_CX
21977 +#endif
21978 +
21979 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21980 +#define __copyuser_seg gs;
21981 +#else
21982 +#define __copyuser_seg
21983 +#endif
21984 +
21985 .text
21986 ENTRY(__put_user_1)
21987 ENTER
21988 +
21989 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21990 + GET_THREAD_INFO(%_ASM_BX)
21991 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
21992 jae bad_put_user
21993 -1: movb %al,(%_ASM_CX)
21994 +
21995 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21996 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
21997 + cmp %_ASM_BX,%_ASM_CX
21998 + jb 1234f
21999 + xor %ebx,%ebx
22000 +1234:
22001 +#endif
22002 +
22003 +#endif
22004 +
22005 +1: __copyuser_seg movb %al,(_DEST)
22006 xor %eax,%eax
22007 EXIT
22008 ENDPROC(__put_user_1)
22009
22010 ENTRY(__put_user_2)
22011 ENTER
22012 +
22013 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22014 + GET_THREAD_INFO(%_ASM_BX)
22015 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22016 sub $1,%_ASM_BX
22017 cmp %_ASM_BX,%_ASM_CX
22018 jae bad_put_user
22019 -2: movw %ax,(%_ASM_CX)
22020 +
22021 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22022 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22023 + cmp %_ASM_BX,%_ASM_CX
22024 + jb 1234f
22025 + xor %ebx,%ebx
22026 +1234:
22027 +#endif
22028 +
22029 +#endif
22030 +
22031 +2: __copyuser_seg movw %ax,(_DEST)
22032 xor %eax,%eax
22033 EXIT
22034 ENDPROC(__put_user_2)
22035
22036 ENTRY(__put_user_4)
22037 ENTER
22038 +
22039 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22040 + GET_THREAD_INFO(%_ASM_BX)
22041 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22042 sub $3,%_ASM_BX
22043 cmp %_ASM_BX,%_ASM_CX
22044 jae bad_put_user
22045 -3: movl %eax,(%_ASM_CX)
22046 +
22047 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22048 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22049 + cmp %_ASM_BX,%_ASM_CX
22050 + jb 1234f
22051 + xor %ebx,%ebx
22052 +1234:
22053 +#endif
22054 +
22055 +#endif
22056 +
22057 +3: __copyuser_seg movl %eax,(_DEST)
22058 xor %eax,%eax
22059 EXIT
22060 ENDPROC(__put_user_4)
22061
22062 ENTRY(__put_user_8)
22063 ENTER
22064 +
22065 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22066 + GET_THREAD_INFO(%_ASM_BX)
22067 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22068 sub $7,%_ASM_BX
22069 cmp %_ASM_BX,%_ASM_CX
22070 jae bad_put_user
22071 -4: mov %_ASM_AX,(%_ASM_CX)
22072 +
22073 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22074 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22075 + cmp %_ASM_BX,%_ASM_CX
22076 + jb 1234f
22077 + xor %ebx,%ebx
22078 +1234:
22079 +#endif
22080 +
22081 +#endif
22082 +
22083 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22084 #ifdef CONFIG_X86_32
22085 -5: movl %edx,4(%_ASM_CX)
22086 +5: __copyuser_seg movl %edx,4(_DEST)
22087 #endif
22088 xor %eax,%eax
22089 EXIT
22090 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22091 index 1cad221..de671ee 100644
22092 --- a/arch/x86/lib/rwlock.S
22093 +++ b/arch/x86/lib/rwlock.S
22094 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22095 FRAME
22096 0: LOCK_PREFIX
22097 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22098 +
22099 +#ifdef CONFIG_PAX_REFCOUNT
22100 + jno 1234f
22101 + LOCK_PREFIX
22102 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22103 + int $4
22104 +1234:
22105 + _ASM_EXTABLE(1234b, 1234b)
22106 +#endif
22107 +
22108 1: rep; nop
22109 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22110 jne 1b
22111 LOCK_PREFIX
22112 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22113 +
22114 +#ifdef CONFIG_PAX_REFCOUNT
22115 + jno 1234f
22116 + LOCK_PREFIX
22117 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22118 + int $4
22119 +1234:
22120 + _ASM_EXTABLE(1234b, 1234b)
22121 +#endif
22122 +
22123 jnz 0b
22124 ENDFRAME
22125 + pax_force_retaddr
22126 ret
22127 CFI_ENDPROC
22128 END(__write_lock_failed)
22129 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22130 FRAME
22131 0: LOCK_PREFIX
22132 READ_LOCK_SIZE(inc) (%__lock_ptr)
22133 +
22134 +#ifdef CONFIG_PAX_REFCOUNT
22135 + jno 1234f
22136 + LOCK_PREFIX
22137 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22138 + int $4
22139 +1234:
22140 + _ASM_EXTABLE(1234b, 1234b)
22141 +#endif
22142 +
22143 1: rep; nop
22144 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22145 js 1b
22146 LOCK_PREFIX
22147 READ_LOCK_SIZE(dec) (%__lock_ptr)
22148 +
22149 +#ifdef CONFIG_PAX_REFCOUNT
22150 + jno 1234f
22151 + LOCK_PREFIX
22152 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22153 + int $4
22154 +1234:
22155 + _ASM_EXTABLE(1234b, 1234b)
22156 +#endif
22157 +
22158 js 0b
22159 ENDFRAME
22160 + pax_force_retaddr
22161 ret
22162 CFI_ENDPROC
22163 END(__read_lock_failed)
22164 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22165 index 5dff5f0..cadebf4 100644
22166 --- a/arch/x86/lib/rwsem.S
22167 +++ b/arch/x86/lib/rwsem.S
22168 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22169 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22170 CFI_RESTORE __ASM_REG(dx)
22171 restore_common_regs
22172 + pax_force_retaddr
22173 ret
22174 CFI_ENDPROC
22175 ENDPROC(call_rwsem_down_read_failed)
22176 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22177 movq %rax,%rdi
22178 call rwsem_down_write_failed
22179 restore_common_regs
22180 + pax_force_retaddr
22181 ret
22182 CFI_ENDPROC
22183 ENDPROC(call_rwsem_down_write_failed)
22184 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22185 movq %rax,%rdi
22186 call rwsem_wake
22187 restore_common_regs
22188 -1: ret
22189 +1: pax_force_retaddr
22190 + ret
22191 CFI_ENDPROC
22192 ENDPROC(call_rwsem_wake)
22193
22194 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22195 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22196 CFI_RESTORE __ASM_REG(dx)
22197 restore_common_regs
22198 + pax_force_retaddr
22199 ret
22200 CFI_ENDPROC
22201 ENDPROC(call_rwsem_downgrade_wake)
22202 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22203 index a63efd6..ccecad8 100644
22204 --- a/arch/x86/lib/thunk_64.S
22205 +++ b/arch/x86/lib/thunk_64.S
22206 @@ -8,6 +8,7 @@
22207 #include <linux/linkage.h>
22208 #include <asm/dwarf2.h>
22209 #include <asm/calling.h>
22210 +#include <asm/alternative-asm.h>
22211
22212 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22213 .macro THUNK name, func, put_ret_addr_in_rdi=0
22214 @@ -41,5 +42,6 @@
22215 SAVE_ARGS
22216 restore:
22217 RESTORE_ARGS
22218 + pax_force_retaddr
22219 ret
22220 CFI_ENDPROC
22221 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22222 index e218d5d..35679b4 100644
22223 --- a/arch/x86/lib/usercopy_32.c
22224 +++ b/arch/x86/lib/usercopy_32.c
22225 @@ -43,7 +43,7 @@ do { \
22226 __asm__ __volatile__( \
22227 " testl %1,%1\n" \
22228 " jz 2f\n" \
22229 - "0: lodsb\n" \
22230 + "0: "__copyuser_seg"lodsb\n" \
22231 " stosb\n" \
22232 " testb %%al,%%al\n" \
22233 " jz 1f\n" \
22234 @@ -128,10 +128,12 @@ do { \
22235 int __d0; \
22236 might_fault(); \
22237 __asm__ __volatile__( \
22238 + __COPYUSER_SET_ES \
22239 "0: rep; stosl\n" \
22240 " movl %2,%0\n" \
22241 "1: rep; stosb\n" \
22242 "2:\n" \
22243 + __COPYUSER_RESTORE_ES \
22244 ".section .fixup,\"ax\"\n" \
22245 "3: lea 0(%2,%0,4),%0\n" \
22246 " jmp 2b\n" \
22247 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22248 might_fault();
22249
22250 __asm__ __volatile__(
22251 + __COPYUSER_SET_ES
22252 " testl %0, %0\n"
22253 " jz 3f\n"
22254 " andl %0,%%ecx\n"
22255 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22256 " subl %%ecx,%0\n"
22257 " addl %0,%%eax\n"
22258 "1:\n"
22259 + __COPYUSER_RESTORE_ES
22260 ".section .fixup,\"ax\"\n"
22261 "2: xorl %%eax,%%eax\n"
22262 " jmp 1b\n"
22263 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22264
22265 #ifdef CONFIG_X86_INTEL_USERCOPY
22266 static unsigned long
22267 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22268 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22269 {
22270 int d0, d1;
22271 __asm__ __volatile__(
22272 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22273 " .align 2,0x90\n"
22274 "3: movl 0(%4), %%eax\n"
22275 "4: movl 4(%4), %%edx\n"
22276 - "5: movl %%eax, 0(%3)\n"
22277 - "6: movl %%edx, 4(%3)\n"
22278 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22279 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22280 "7: movl 8(%4), %%eax\n"
22281 "8: movl 12(%4),%%edx\n"
22282 - "9: movl %%eax, 8(%3)\n"
22283 - "10: movl %%edx, 12(%3)\n"
22284 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22285 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22286 "11: movl 16(%4), %%eax\n"
22287 "12: movl 20(%4), %%edx\n"
22288 - "13: movl %%eax, 16(%3)\n"
22289 - "14: movl %%edx, 20(%3)\n"
22290 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22291 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22292 "15: movl 24(%4), %%eax\n"
22293 "16: movl 28(%4), %%edx\n"
22294 - "17: movl %%eax, 24(%3)\n"
22295 - "18: movl %%edx, 28(%3)\n"
22296 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22297 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22298 "19: movl 32(%4), %%eax\n"
22299 "20: movl 36(%4), %%edx\n"
22300 - "21: movl %%eax, 32(%3)\n"
22301 - "22: movl %%edx, 36(%3)\n"
22302 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22303 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22304 "23: movl 40(%4), %%eax\n"
22305 "24: movl 44(%4), %%edx\n"
22306 - "25: movl %%eax, 40(%3)\n"
22307 - "26: movl %%edx, 44(%3)\n"
22308 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22309 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22310 "27: movl 48(%4), %%eax\n"
22311 "28: movl 52(%4), %%edx\n"
22312 - "29: movl %%eax, 48(%3)\n"
22313 - "30: movl %%edx, 52(%3)\n"
22314 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22315 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22316 "31: movl 56(%4), %%eax\n"
22317 "32: movl 60(%4), %%edx\n"
22318 - "33: movl %%eax, 56(%3)\n"
22319 - "34: movl %%edx, 60(%3)\n"
22320 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22321 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22322 " addl $-64, %0\n"
22323 " addl $64, %4\n"
22324 " addl $64, %3\n"
22325 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22326 " shrl $2, %0\n"
22327 " andl $3, %%eax\n"
22328 " cld\n"
22329 + __COPYUSER_SET_ES
22330 "99: rep; movsl\n"
22331 "36: movl %%eax, %0\n"
22332 "37: rep; movsb\n"
22333 "100:\n"
22334 + __COPYUSER_RESTORE_ES
22335 + ".section .fixup,\"ax\"\n"
22336 + "101: lea 0(%%eax,%0,4),%0\n"
22337 + " jmp 100b\n"
22338 + ".previous\n"
22339 + ".section __ex_table,\"a\"\n"
22340 + " .align 4\n"
22341 + " .long 1b,100b\n"
22342 + " .long 2b,100b\n"
22343 + " .long 3b,100b\n"
22344 + " .long 4b,100b\n"
22345 + " .long 5b,100b\n"
22346 + " .long 6b,100b\n"
22347 + " .long 7b,100b\n"
22348 + " .long 8b,100b\n"
22349 + " .long 9b,100b\n"
22350 + " .long 10b,100b\n"
22351 + " .long 11b,100b\n"
22352 + " .long 12b,100b\n"
22353 + " .long 13b,100b\n"
22354 + " .long 14b,100b\n"
22355 + " .long 15b,100b\n"
22356 + " .long 16b,100b\n"
22357 + " .long 17b,100b\n"
22358 + " .long 18b,100b\n"
22359 + " .long 19b,100b\n"
22360 + " .long 20b,100b\n"
22361 + " .long 21b,100b\n"
22362 + " .long 22b,100b\n"
22363 + " .long 23b,100b\n"
22364 + " .long 24b,100b\n"
22365 + " .long 25b,100b\n"
22366 + " .long 26b,100b\n"
22367 + " .long 27b,100b\n"
22368 + " .long 28b,100b\n"
22369 + " .long 29b,100b\n"
22370 + " .long 30b,100b\n"
22371 + " .long 31b,100b\n"
22372 + " .long 32b,100b\n"
22373 + " .long 33b,100b\n"
22374 + " .long 34b,100b\n"
22375 + " .long 35b,100b\n"
22376 + " .long 36b,100b\n"
22377 + " .long 37b,100b\n"
22378 + " .long 99b,101b\n"
22379 + ".previous"
22380 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22381 + : "1"(to), "2"(from), "0"(size)
22382 + : "eax", "edx", "memory");
22383 + return size;
22384 +}
22385 +
22386 +static unsigned long
22387 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22388 +{
22389 + int d0, d1;
22390 + __asm__ __volatile__(
22391 + " .align 2,0x90\n"
22392 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22393 + " cmpl $67, %0\n"
22394 + " jbe 3f\n"
22395 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22396 + " .align 2,0x90\n"
22397 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22398 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22399 + "5: movl %%eax, 0(%3)\n"
22400 + "6: movl %%edx, 4(%3)\n"
22401 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22402 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22403 + "9: movl %%eax, 8(%3)\n"
22404 + "10: movl %%edx, 12(%3)\n"
22405 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22406 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22407 + "13: movl %%eax, 16(%3)\n"
22408 + "14: movl %%edx, 20(%3)\n"
22409 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22410 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22411 + "17: movl %%eax, 24(%3)\n"
22412 + "18: movl %%edx, 28(%3)\n"
22413 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22414 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22415 + "21: movl %%eax, 32(%3)\n"
22416 + "22: movl %%edx, 36(%3)\n"
22417 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22418 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22419 + "25: movl %%eax, 40(%3)\n"
22420 + "26: movl %%edx, 44(%3)\n"
22421 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22422 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22423 + "29: movl %%eax, 48(%3)\n"
22424 + "30: movl %%edx, 52(%3)\n"
22425 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22426 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22427 + "33: movl %%eax, 56(%3)\n"
22428 + "34: movl %%edx, 60(%3)\n"
22429 + " addl $-64, %0\n"
22430 + " addl $64, %4\n"
22431 + " addl $64, %3\n"
22432 + " cmpl $63, %0\n"
22433 + " ja 1b\n"
22434 + "35: movl %0, %%eax\n"
22435 + " shrl $2, %0\n"
22436 + " andl $3, %%eax\n"
22437 + " cld\n"
22438 + "99: rep; "__copyuser_seg" movsl\n"
22439 + "36: movl %%eax, %0\n"
22440 + "37: rep; "__copyuser_seg" movsb\n"
22441 + "100:\n"
22442 ".section .fixup,\"ax\"\n"
22443 "101: lea 0(%%eax,%0,4),%0\n"
22444 " jmp 100b\n"
22445 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22446 int d0, d1;
22447 __asm__ __volatile__(
22448 " .align 2,0x90\n"
22449 - "0: movl 32(%4), %%eax\n"
22450 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22451 " cmpl $67, %0\n"
22452 " jbe 2f\n"
22453 - "1: movl 64(%4), %%eax\n"
22454 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22455 " .align 2,0x90\n"
22456 - "2: movl 0(%4), %%eax\n"
22457 - "21: movl 4(%4), %%edx\n"
22458 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22459 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22460 " movl %%eax, 0(%3)\n"
22461 " movl %%edx, 4(%3)\n"
22462 - "3: movl 8(%4), %%eax\n"
22463 - "31: movl 12(%4),%%edx\n"
22464 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22465 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22466 " movl %%eax, 8(%3)\n"
22467 " movl %%edx, 12(%3)\n"
22468 - "4: movl 16(%4), %%eax\n"
22469 - "41: movl 20(%4), %%edx\n"
22470 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22471 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22472 " movl %%eax, 16(%3)\n"
22473 " movl %%edx, 20(%3)\n"
22474 - "10: movl 24(%4), %%eax\n"
22475 - "51: movl 28(%4), %%edx\n"
22476 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22477 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22478 " movl %%eax, 24(%3)\n"
22479 " movl %%edx, 28(%3)\n"
22480 - "11: movl 32(%4), %%eax\n"
22481 - "61: movl 36(%4), %%edx\n"
22482 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22483 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22484 " movl %%eax, 32(%3)\n"
22485 " movl %%edx, 36(%3)\n"
22486 - "12: movl 40(%4), %%eax\n"
22487 - "71: movl 44(%4), %%edx\n"
22488 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22489 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22490 " movl %%eax, 40(%3)\n"
22491 " movl %%edx, 44(%3)\n"
22492 - "13: movl 48(%4), %%eax\n"
22493 - "81: movl 52(%4), %%edx\n"
22494 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22495 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22496 " movl %%eax, 48(%3)\n"
22497 " movl %%edx, 52(%3)\n"
22498 - "14: movl 56(%4), %%eax\n"
22499 - "91: movl 60(%4), %%edx\n"
22500 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22501 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22502 " movl %%eax, 56(%3)\n"
22503 " movl %%edx, 60(%3)\n"
22504 " addl $-64, %0\n"
22505 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22506 " shrl $2, %0\n"
22507 " andl $3, %%eax\n"
22508 " cld\n"
22509 - "6: rep; movsl\n"
22510 + "6: rep; "__copyuser_seg" movsl\n"
22511 " movl %%eax,%0\n"
22512 - "7: rep; movsb\n"
22513 + "7: rep; "__copyuser_seg" movsb\n"
22514 "8:\n"
22515 ".section .fixup,\"ax\"\n"
22516 "9: lea 0(%%eax,%0,4),%0\n"
22517 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22518
22519 __asm__ __volatile__(
22520 " .align 2,0x90\n"
22521 - "0: movl 32(%4), %%eax\n"
22522 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22523 " cmpl $67, %0\n"
22524 " jbe 2f\n"
22525 - "1: movl 64(%4), %%eax\n"
22526 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22527 " .align 2,0x90\n"
22528 - "2: movl 0(%4), %%eax\n"
22529 - "21: movl 4(%4), %%edx\n"
22530 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22531 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22532 " movnti %%eax, 0(%3)\n"
22533 " movnti %%edx, 4(%3)\n"
22534 - "3: movl 8(%4), %%eax\n"
22535 - "31: movl 12(%4),%%edx\n"
22536 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22537 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22538 " movnti %%eax, 8(%3)\n"
22539 " movnti %%edx, 12(%3)\n"
22540 - "4: movl 16(%4), %%eax\n"
22541 - "41: movl 20(%4), %%edx\n"
22542 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22543 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22544 " movnti %%eax, 16(%3)\n"
22545 " movnti %%edx, 20(%3)\n"
22546 - "10: movl 24(%4), %%eax\n"
22547 - "51: movl 28(%4), %%edx\n"
22548 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22549 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22550 " movnti %%eax, 24(%3)\n"
22551 " movnti %%edx, 28(%3)\n"
22552 - "11: movl 32(%4), %%eax\n"
22553 - "61: movl 36(%4), %%edx\n"
22554 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22555 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22556 " movnti %%eax, 32(%3)\n"
22557 " movnti %%edx, 36(%3)\n"
22558 - "12: movl 40(%4), %%eax\n"
22559 - "71: movl 44(%4), %%edx\n"
22560 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22561 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22562 " movnti %%eax, 40(%3)\n"
22563 " movnti %%edx, 44(%3)\n"
22564 - "13: movl 48(%4), %%eax\n"
22565 - "81: movl 52(%4), %%edx\n"
22566 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22567 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22568 " movnti %%eax, 48(%3)\n"
22569 " movnti %%edx, 52(%3)\n"
22570 - "14: movl 56(%4), %%eax\n"
22571 - "91: movl 60(%4), %%edx\n"
22572 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22573 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22574 " movnti %%eax, 56(%3)\n"
22575 " movnti %%edx, 60(%3)\n"
22576 " addl $-64, %0\n"
22577 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22578 " shrl $2, %0\n"
22579 " andl $3, %%eax\n"
22580 " cld\n"
22581 - "6: rep; movsl\n"
22582 + "6: rep; "__copyuser_seg" movsl\n"
22583 " movl %%eax,%0\n"
22584 - "7: rep; movsb\n"
22585 + "7: rep; "__copyuser_seg" movsb\n"
22586 "8:\n"
22587 ".section .fixup,\"ax\"\n"
22588 "9: lea 0(%%eax,%0,4),%0\n"
22589 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
22590
22591 __asm__ __volatile__(
22592 " .align 2,0x90\n"
22593 - "0: movl 32(%4), %%eax\n"
22594 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22595 " cmpl $67, %0\n"
22596 " jbe 2f\n"
22597 - "1: movl 64(%4), %%eax\n"
22598 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22599 " .align 2,0x90\n"
22600 - "2: movl 0(%4), %%eax\n"
22601 - "21: movl 4(%4), %%edx\n"
22602 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22603 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22604 " movnti %%eax, 0(%3)\n"
22605 " movnti %%edx, 4(%3)\n"
22606 - "3: movl 8(%4), %%eax\n"
22607 - "31: movl 12(%4),%%edx\n"
22608 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22609 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22610 " movnti %%eax, 8(%3)\n"
22611 " movnti %%edx, 12(%3)\n"
22612 - "4: movl 16(%4), %%eax\n"
22613 - "41: movl 20(%4), %%edx\n"
22614 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22615 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22616 " movnti %%eax, 16(%3)\n"
22617 " movnti %%edx, 20(%3)\n"
22618 - "10: movl 24(%4), %%eax\n"
22619 - "51: movl 28(%4), %%edx\n"
22620 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22621 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22622 " movnti %%eax, 24(%3)\n"
22623 " movnti %%edx, 28(%3)\n"
22624 - "11: movl 32(%4), %%eax\n"
22625 - "61: movl 36(%4), %%edx\n"
22626 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22627 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22628 " movnti %%eax, 32(%3)\n"
22629 " movnti %%edx, 36(%3)\n"
22630 - "12: movl 40(%4), %%eax\n"
22631 - "71: movl 44(%4), %%edx\n"
22632 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22633 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22634 " movnti %%eax, 40(%3)\n"
22635 " movnti %%edx, 44(%3)\n"
22636 - "13: movl 48(%4), %%eax\n"
22637 - "81: movl 52(%4), %%edx\n"
22638 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22639 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22640 " movnti %%eax, 48(%3)\n"
22641 " movnti %%edx, 52(%3)\n"
22642 - "14: movl 56(%4), %%eax\n"
22643 - "91: movl 60(%4), %%edx\n"
22644 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22645 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22646 " movnti %%eax, 56(%3)\n"
22647 " movnti %%edx, 60(%3)\n"
22648 " addl $-64, %0\n"
22649 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
22650 " shrl $2, %0\n"
22651 " andl $3, %%eax\n"
22652 " cld\n"
22653 - "6: rep; movsl\n"
22654 + "6: rep; "__copyuser_seg" movsl\n"
22655 " movl %%eax,%0\n"
22656 - "7: rep; movsb\n"
22657 + "7: rep; "__copyuser_seg" movsb\n"
22658 "8:\n"
22659 ".section .fixup,\"ax\"\n"
22660 "9: lea 0(%%eax,%0,4),%0\n"
22661 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
22662 */
22663 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
22664 unsigned long size);
22665 -unsigned long __copy_user_intel(void __user *to, const void *from,
22666 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
22667 + unsigned long size);
22668 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
22669 unsigned long size);
22670 unsigned long __copy_user_zeroing_intel_nocache(void *to,
22671 const void __user *from, unsigned long size);
22672 #endif /* CONFIG_X86_INTEL_USERCOPY */
22673
22674 /* Generic arbitrary sized copy. */
22675 -#define __copy_user(to, from, size) \
22676 +#define __copy_user(to, from, size, prefix, set, restore) \
22677 do { \
22678 int __d0, __d1, __d2; \
22679 __asm__ __volatile__( \
22680 + set \
22681 " cmp $7,%0\n" \
22682 " jbe 1f\n" \
22683 " movl %1,%0\n" \
22684 " negl %0\n" \
22685 " andl $7,%0\n" \
22686 " subl %0,%3\n" \
22687 - "4: rep; movsb\n" \
22688 + "4: rep; "prefix"movsb\n" \
22689 " movl %3,%0\n" \
22690 " shrl $2,%0\n" \
22691 " andl $3,%3\n" \
22692 " .align 2,0x90\n" \
22693 - "0: rep; movsl\n" \
22694 + "0: rep; "prefix"movsl\n" \
22695 " movl %3,%0\n" \
22696 - "1: rep; movsb\n" \
22697 + "1: rep; "prefix"movsb\n" \
22698 "2:\n" \
22699 + restore \
22700 ".section .fixup,\"ax\"\n" \
22701 "5: addl %3,%0\n" \
22702 " jmp 2b\n" \
22703 @@ -682,14 +799,14 @@ do { \
22704 " negl %0\n" \
22705 " andl $7,%0\n" \
22706 " subl %0,%3\n" \
22707 - "4: rep; movsb\n" \
22708 + "4: rep; "__copyuser_seg"movsb\n" \
22709 " movl %3,%0\n" \
22710 " shrl $2,%0\n" \
22711 " andl $3,%3\n" \
22712 " .align 2,0x90\n" \
22713 - "0: rep; movsl\n" \
22714 + "0: rep; "__copyuser_seg"movsl\n" \
22715 " movl %3,%0\n" \
22716 - "1: rep; movsb\n" \
22717 + "1: rep; "__copyuser_seg"movsb\n" \
22718 "2:\n" \
22719 ".section .fixup,\"ax\"\n" \
22720 "5: addl %3,%0\n" \
22721 @@ -775,9 +892,9 @@ survive:
22722 }
22723 #endif
22724 if (movsl_is_ok(to, from, n))
22725 - __copy_user(to, from, n);
22726 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
22727 else
22728 - n = __copy_user_intel(to, from, n);
22729 + n = __generic_copy_to_user_intel(to, from, n);
22730 return n;
22731 }
22732 EXPORT_SYMBOL(__copy_to_user_ll);
22733 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
22734 unsigned long n)
22735 {
22736 if (movsl_is_ok(to, from, n))
22737 - __copy_user(to, from, n);
22738 + __copy_user(to, from, n, __copyuser_seg, "", "");
22739 else
22740 - n = __copy_user_intel((void __user *)to,
22741 - (const void *)from, n);
22742 + n = __generic_copy_from_user_intel(to, from, n);
22743 return n;
22744 }
22745 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
22746 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
22747 if (n > 64 && cpu_has_xmm2)
22748 n = __copy_user_intel_nocache(to, from, n);
22749 else
22750 - __copy_user(to, from, n);
22751 + __copy_user(to, from, n, __copyuser_seg, "", "");
22752 #else
22753 - __copy_user(to, from, n);
22754 + __copy_user(to, from, n, __copyuser_seg, "", "");
22755 #endif
22756 return n;
22757 }
22758 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
22759
22760 -/**
22761 - * copy_to_user: - Copy a block of data into user space.
22762 - * @to: Destination address, in user space.
22763 - * @from: Source address, in kernel space.
22764 - * @n: Number of bytes to copy.
22765 - *
22766 - * Context: User context only. This function may sleep.
22767 - *
22768 - * Copy data from kernel space to user space.
22769 - *
22770 - * Returns number of bytes that could not be copied.
22771 - * On success, this will be zero.
22772 - */
22773 -unsigned long
22774 -copy_to_user(void __user *to, const void *from, unsigned long n)
22775 -{
22776 - if (access_ok(VERIFY_WRITE, to, n))
22777 - n = __copy_to_user(to, from, n);
22778 - return n;
22779 -}
22780 -EXPORT_SYMBOL(copy_to_user);
22781 -
22782 -/**
22783 - * copy_from_user: - Copy a block of data from user space.
22784 - * @to: Destination address, in kernel space.
22785 - * @from: Source address, in user space.
22786 - * @n: Number of bytes to copy.
22787 - *
22788 - * Context: User context only. This function may sleep.
22789 - *
22790 - * Copy data from user space to kernel space.
22791 - *
22792 - * Returns number of bytes that could not be copied.
22793 - * On success, this will be zero.
22794 - *
22795 - * If some data could not be copied, this function will pad the copied
22796 - * data to the requested size using zero bytes.
22797 - */
22798 -unsigned long
22799 -_copy_from_user(void *to, const void __user *from, unsigned long n)
22800 -{
22801 - if (access_ok(VERIFY_READ, from, n))
22802 - n = __copy_from_user(to, from, n);
22803 - else
22804 - memset(to, 0, n);
22805 - return n;
22806 -}
22807 -EXPORT_SYMBOL(_copy_from_user);
22808 -
22809 void copy_from_user_overflow(void)
22810 {
22811 WARN(1, "Buffer overflow detected!\n");
22812 }
22813 EXPORT_SYMBOL(copy_from_user_overflow);
22814 +
22815 +void copy_to_user_overflow(void)
22816 +{
22817 + WARN(1, "Buffer overflow detected!\n");
22818 +}
22819 +EXPORT_SYMBOL(copy_to_user_overflow);
22820 +
22821 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22822 +void __set_fs(mm_segment_t x)
22823 +{
22824 + switch (x.seg) {
22825 + case 0:
22826 + loadsegment(gs, 0);
22827 + break;
22828 + case TASK_SIZE_MAX:
22829 + loadsegment(gs, __USER_DS);
22830 + break;
22831 + case -1UL:
22832 + loadsegment(gs, __KERNEL_DS);
22833 + break;
22834 + default:
22835 + BUG();
22836 + }
22837 + return;
22838 +}
22839 +EXPORT_SYMBOL(__set_fs);
22840 +
22841 +void set_fs(mm_segment_t x)
22842 +{
22843 + current_thread_info()->addr_limit = x;
22844 + __set_fs(x);
22845 +}
22846 +EXPORT_SYMBOL(set_fs);
22847 +#endif
22848 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
22849 index b7c2849..8633ad8 100644
22850 --- a/arch/x86/lib/usercopy_64.c
22851 +++ b/arch/x86/lib/usercopy_64.c
22852 @@ -42,6 +42,12 @@ long
22853 __strncpy_from_user(char *dst, const char __user *src, long count)
22854 {
22855 long res;
22856 +
22857 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22858 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22859 + src += PAX_USER_SHADOW_BASE;
22860 +#endif
22861 +
22862 __do_strncpy_from_user(dst, src, count, res);
22863 return res;
22864 }
22865 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
22866 {
22867 long __d0;
22868 might_fault();
22869 +
22870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22871 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
22872 + addr += PAX_USER_SHADOW_BASE;
22873 +#endif
22874 +
22875 /* no memory constraint because it doesn't change any memory gcc knows
22876 about */
22877 asm volatile(
22878 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
22879 }
22880 EXPORT_SYMBOL(strlen_user);
22881
22882 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
22883 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
22884 {
22885 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22886 - return copy_user_generic((__force void *)to, (__force void *)from, len);
22887 - }
22888 - return len;
22889 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22890 +
22891 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22892 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
22893 + to += PAX_USER_SHADOW_BASE;
22894 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
22895 + from += PAX_USER_SHADOW_BASE;
22896 +#endif
22897 +
22898 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
22899 + }
22900 + return len;
22901 }
22902 EXPORT_SYMBOL(copy_in_user);
22903
22904 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
22905 * it is not necessary to optimize tail handling.
22906 */
22907 unsigned long
22908 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
22909 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
22910 {
22911 char c;
22912 unsigned zero_len;
22913 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
22914 index 1fb85db..8b3540b 100644
22915 --- a/arch/x86/mm/extable.c
22916 +++ b/arch/x86/mm/extable.c
22917 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
22918 const struct exception_table_entry *fixup;
22919
22920 #ifdef CONFIG_PNPBIOS
22921 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
22922 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
22923 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
22924 extern u32 pnp_bios_is_utter_crap;
22925 pnp_bios_is_utter_crap = 1;
22926 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
22927 index f0b4caf..d92fd42 100644
22928 --- a/arch/x86/mm/fault.c
22929 +++ b/arch/x86/mm/fault.c
22930 @@ -13,11 +13,18 @@
22931 #include <linux/perf_event.h> /* perf_sw_event */
22932 #include <linux/hugetlb.h> /* hstate_index_to_shift */
22933 #include <linux/prefetch.h> /* prefetchw */
22934 +#include <linux/unistd.h>
22935 +#include <linux/compiler.h>
22936
22937 #include <asm/traps.h> /* dotraplinkage, ... */
22938 #include <asm/pgalloc.h> /* pgd_*(), ... */
22939 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
22940 #include <asm/fixmap.h> /* VSYSCALL_START */
22941 +#include <asm/tlbflush.h>
22942 +
22943 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22944 +#include <asm/stacktrace.h>
22945 +#endif
22946
22947 /*
22948 * Page fault error code bits:
22949 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
22950 int ret = 0;
22951
22952 /* kprobe_running() needs smp_processor_id() */
22953 - if (kprobes_built_in() && !user_mode_vm(regs)) {
22954 + if (kprobes_built_in() && !user_mode(regs)) {
22955 preempt_disable();
22956 if (kprobe_running() && kprobe_fault_handler(regs, 14))
22957 ret = 1;
22958 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
22959 return !instr_lo || (instr_lo>>1) == 1;
22960 case 0x00:
22961 /* Prefetch instruction is 0x0F0D or 0x0F18 */
22962 - if (probe_kernel_address(instr, opcode))
22963 + if (user_mode(regs)) {
22964 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22965 + return 0;
22966 + } else if (probe_kernel_address(instr, opcode))
22967 return 0;
22968
22969 *prefetch = (instr_lo == 0xF) &&
22970 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
22971 while (instr < max_instr) {
22972 unsigned char opcode;
22973
22974 - if (probe_kernel_address(instr, opcode))
22975 + if (user_mode(regs)) {
22976 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22977 + break;
22978 + } else if (probe_kernel_address(instr, opcode))
22979 break;
22980
22981 instr++;
22982 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
22983 force_sig_info(si_signo, &info, tsk);
22984 }
22985
22986 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22987 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
22988 +#endif
22989 +
22990 +#ifdef CONFIG_PAX_EMUTRAMP
22991 +static int pax_handle_fetch_fault(struct pt_regs *regs);
22992 +#endif
22993 +
22994 +#ifdef CONFIG_PAX_PAGEEXEC
22995 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
22996 +{
22997 + pgd_t *pgd;
22998 + pud_t *pud;
22999 + pmd_t *pmd;
23000 +
23001 + pgd = pgd_offset(mm, address);
23002 + if (!pgd_present(*pgd))
23003 + return NULL;
23004 + pud = pud_offset(pgd, address);
23005 + if (!pud_present(*pud))
23006 + return NULL;
23007 + pmd = pmd_offset(pud, address);
23008 + if (!pmd_present(*pmd))
23009 + return NULL;
23010 + return pmd;
23011 +}
23012 +#endif
23013 +
23014 DEFINE_SPINLOCK(pgd_lock);
23015 LIST_HEAD(pgd_list);
23016
23017 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23018 for (address = VMALLOC_START & PMD_MASK;
23019 address >= TASK_SIZE && address < FIXADDR_TOP;
23020 address += PMD_SIZE) {
23021 +
23022 +#ifdef CONFIG_PAX_PER_CPU_PGD
23023 + unsigned long cpu;
23024 +#else
23025 struct page *page;
23026 +#endif
23027
23028 spin_lock(&pgd_lock);
23029 +
23030 +#ifdef CONFIG_PAX_PER_CPU_PGD
23031 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23032 + pgd_t *pgd = get_cpu_pgd(cpu);
23033 + pmd_t *ret;
23034 +#else
23035 list_for_each_entry(page, &pgd_list, lru) {
23036 + pgd_t *pgd = page_address(page);
23037 spinlock_t *pgt_lock;
23038 pmd_t *ret;
23039
23040 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23041 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23042
23043 spin_lock(pgt_lock);
23044 - ret = vmalloc_sync_one(page_address(page), address);
23045 +#endif
23046 +
23047 + ret = vmalloc_sync_one(pgd, address);
23048 +
23049 +#ifndef CONFIG_PAX_PER_CPU_PGD
23050 spin_unlock(pgt_lock);
23051 +#endif
23052
23053 if (!ret)
23054 break;
23055 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23056 * an interrupt in the middle of a task switch..
23057 */
23058 pgd_paddr = read_cr3();
23059 +
23060 +#ifdef CONFIG_PAX_PER_CPU_PGD
23061 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23062 +#endif
23063 +
23064 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23065 if (!pmd_k)
23066 return -1;
23067 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23068 * happen within a race in page table update. In the later
23069 * case just flush:
23070 */
23071 +
23072 +#ifdef CONFIG_PAX_PER_CPU_PGD
23073 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23074 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23075 +#else
23076 pgd = pgd_offset(current->active_mm, address);
23077 +#endif
23078 +
23079 pgd_ref = pgd_offset_k(address);
23080 if (pgd_none(*pgd_ref))
23081 return -1;
23082 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23083 static int is_errata100(struct pt_regs *regs, unsigned long address)
23084 {
23085 #ifdef CONFIG_X86_64
23086 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23087 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23088 return 1;
23089 #endif
23090 return 0;
23091 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23092 }
23093
23094 static const char nx_warning[] = KERN_CRIT
23095 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23096 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23097
23098 static void
23099 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23100 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23101 if (!oops_may_print())
23102 return;
23103
23104 - if (error_code & PF_INSTR) {
23105 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23106 unsigned int level;
23107
23108 pte_t *pte = lookup_address(address, &level);
23109
23110 if (pte && pte_present(*pte) && !pte_exec(*pte))
23111 - printk(nx_warning, current_uid());
23112 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23113 }
23114
23115 +#ifdef CONFIG_PAX_KERNEXEC
23116 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23117 + if (current->signal->curr_ip)
23118 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23119 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23120 + else
23121 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23122 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23123 + }
23124 +#endif
23125 +
23126 printk(KERN_ALERT "BUG: unable to handle kernel ");
23127 if (address < PAGE_SIZE)
23128 printk(KERN_CONT "NULL pointer dereference");
23129 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23130 }
23131 #endif
23132
23133 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23134 + if (pax_is_fetch_fault(regs, error_code, address)) {
23135 +
23136 +#ifdef CONFIG_PAX_EMUTRAMP
23137 + switch (pax_handle_fetch_fault(regs)) {
23138 + case 2:
23139 + return;
23140 + }
23141 +#endif
23142 +
23143 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23144 + do_group_exit(SIGKILL);
23145 + }
23146 +#endif
23147 +
23148 if (unlikely(show_unhandled_signals))
23149 show_signal_msg(regs, error_code, address, tsk);
23150
23151 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23152 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23153 printk(KERN_ERR
23154 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23155 - tsk->comm, tsk->pid, address);
23156 + tsk->comm, task_pid_nr(tsk), address);
23157 code = BUS_MCEERR_AR;
23158 }
23159 #endif
23160 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23161 return 1;
23162 }
23163
23164 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23165 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23166 +{
23167 + pte_t *pte;
23168 + pmd_t *pmd;
23169 + spinlock_t *ptl;
23170 + unsigned char pte_mask;
23171 +
23172 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23173 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23174 + return 0;
23175 +
23176 + /* PaX: it's our fault, let's handle it if we can */
23177 +
23178 + /* PaX: take a look at read faults before acquiring any locks */
23179 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23180 + /* instruction fetch attempt from a protected page in user mode */
23181 + up_read(&mm->mmap_sem);
23182 +
23183 +#ifdef CONFIG_PAX_EMUTRAMP
23184 + switch (pax_handle_fetch_fault(regs)) {
23185 + case 2:
23186 + return 1;
23187 + }
23188 +#endif
23189 +
23190 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23191 + do_group_exit(SIGKILL);
23192 + }
23193 +
23194 + pmd = pax_get_pmd(mm, address);
23195 + if (unlikely(!pmd))
23196 + return 0;
23197 +
23198 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23199 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23200 + pte_unmap_unlock(pte, ptl);
23201 + return 0;
23202 + }
23203 +
23204 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23205 + /* write attempt to a protected page in user mode */
23206 + pte_unmap_unlock(pte, ptl);
23207 + return 0;
23208 + }
23209 +
23210 +#ifdef CONFIG_SMP
23211 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23212 +#else
23213 + if (likely(address > get_limit(regs->cs)))
23214 +#endif
23215 + {
23216 + set_pte(pte, pte_mkread(*pte));
23217 + __flush_tlb_one(address);
23218 + pte_unmap_unlock(pte, ptl);
23219 + up_read(&mm->mmap_sem);
23220 + return 1;
23221 + }
23222 +
23223 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23224 +
23225 + /*
23226 + * PaX: fill DTLB with user rights and retry
23227 + */
23228 + __asm__ __volatile__ (
23229 + "orb %2,(%1)\n"
23230 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23231 +/*
23232 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23233 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23234 + * page fault when examined during a TLB load attempt. this is true not only
23235 + * for PTEs holding a non-present entry but also present entries that will
23236 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23237 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23238 + * for our target pages since their PTEs are simply not in the TLBs at all.
23239 +
23240 + * the best thing in omitting it is that we gain around 15-20% speed in the
23241 + * fast path of the page fault handler and can get rid of tracing since we
23242 + * can no longer flush unintended entries.
23243 + */
23244 + "invlpg (%0)\n"
23245 +#endif
23246 + __copyuser_seg"testb $0,(%0)\n"
23247 + "xorb %3,(%1)\n"
23248 + :
23249 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23250 + : "memory", "cc");
23251 + pte_unmap_unlock(pte, ptl);
23252 + up_read(&mm->mmap_sem);
23253 + return 1;
23254 +}
23255 +#endif
23256 +
23257 /*
23258 * Handle a spurious fault caused by a stale TLB entry.
23259 *
23260 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23261 static inline int
23262 access_error(unsigned long error_code, struct vm_area_struct *vma)
23263 {
23264 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23265 + return 1;
23266 +
23267 if (error_code & PF_WRITE) {
23268 /* write, present and write, not present: */
23269 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23270 @@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23271 {
23272 struct vm_area_struct *vma;
23273 struct task_struct *tsk;
23274 - unsigned long address;
23275 struct mm_struct *mm;
23276 int fault;
23277 int write = error_code & PF_WRITE;
23278 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23279 (write ? FAULT_FLAG_WRITE : 0);
23280
23281 - tsk = current;
23282 - mm = tsk->mm;
23283 -
23284 /* Get the faulting address: */
23285 - address = read_cr2();
23286 + unsigned long address = read_cr2();
23287 +
23288 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23289 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23290 + if (!search_exception_tables(regs->ip)) {
23291 + bad_area_nosemaphore(regs, error_code, address);
23292 + return;
23293 + }
23294 + if (address < PAX_USER_SHADOW_BASE) {
23295 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23296 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23297 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23298 + } else
23299 + address -= PAX_USER_SHADOW_BASE;
23300 + }
23301 +#endif
23302 +
23303 + tsk = current;
23304 + mm = tsk->mm;
23305
23306 /*
23307 * Detect and handle instructions that would cause a page fault for
23308 @@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23309 * User-mode registers count as a user access even for any
23310 * potential system fault or CPU buglet:
23311 */
23312 - if (user_mode_vm(regs)) {
23313 + if (user_mode(regs)) {
23314 local_irq_enable();
23315 error_code |= PF_USER;
23316 } else {
23317 @@ -1132,6 +1338,11 @@ retry:
23318 might_sleep();
23319 }
23320
23321 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23322 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23323 + return;
23324 +#endif
23325 +
23326 vma = find_vma(mm, address);
23327 if (unlikely(!vma)) {
23328 bad_area(regs, error_code, address);
23329 @@ -1143,18 +1354,24 @@ retry:
23330 bad_area(regs, error_code, address);
23331 return;
23332 }
23333 - if (error_code & PF_USER) {
23334 - /*
23335 - * Accessing the stack below %sp is always a bug.
23336 - * The large cushion allows instructions like enter
23337 - * and pusha to work. ("enter $65535, $31" pushes
23338 - * 32 pointers and then decrements %sp by 65535.)
23339 - */
23340 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23341 - bad_area(regs, error_code, address);
23342 - return;
23343 - }
23344 + /*
23345 + * Accessing the stack below %sp is always a bug.
23346 + * The large cushion allows instructions like enter
23347 + * and pusha to work. ("enter $65535, $31" pushes
23348 + * 32 pointers and then decrements %sp by 65535.)
23349 + */
23350 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23351 + bad_area(regs, error_code, address);
23352 + return;
23353 }
23354 +
23355 +#ifdef CONFIG_PAX_SEGMEXEC
23356 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23357 + bad_area(regs, error_code, address);
23358 + return;
23359 + }
23360 +#endif
23361 +
23362 if (unlikely(expand_stack(vma, address))) {
23363 bad_area(regs, error_code, address);
23364 return;
23365 @@ -1209,3 +1426,292 @@ good_area:
23366
23367 up_read(&mm->mmap_sem);
23368 }
23369 +
23370 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23371 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23372 +{
23373 + struct mm_struct *mm = current->mm;
23374 + unsigned long ip = regs->ip;
23375 +
23376 + if (v8086_mode(regs))
23377 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23378 +
23379 +#ifdef CONFIG_PAX_PAGEEXEC
23380 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23381 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23382 + return true;
23383 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23384 + return true;
23385 + return false;
23386 + }
23387 +#endif
23388 +
23389 +#ifdef CONFIG_PAX_SEGMEXEC
23390 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23391 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23392 + return true;
23393 + return false;
23394 + }
23395 +#endif
23396 +
23397 + return false;
23398 +}
23399 +#endif
23400 +
23401 +#ifdef CONFIG_PAX_EMUTRAMP
23402 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23403 +{
23404 + int err;
23405 +
23406 + do { /* PaX: libffi trampoline emulation */
23407 + unsigned char mov, jmp;
23408 + unsigned int addr1, addr2;
23409 +
23410 +#ifdef CONFIG_X86_64
23411 + if ((regs->ip + 9) >> 32)
23412 + break;
23413 +#endif
23414 +
23415 + err = get_user(mov, (unsigned char __user *)regs->ip);
23416 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23417 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23418 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23419 +
23420 + if (err)
23421 + break;
23422 +
23423 + if (mov == 0xB8 && jmp == 0xE9) {
23424 + regs->ax = addr1;
23425 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23426 + return 2;
23427 + }
23428 + } while (0);
23429 +
23430 + do { /* PaX: gcc trampoline emulation #1 */
23431 + unsigned char mov1, mov2;
23432 + unsigned short jmp;
23433 + unsigned int addr1, addr2;
23434 +
23435 +#ifdef CONFIG_X86_64
23436 + if ((regs->ip + 11) >> 32)
23437 + break;
23438 +#endif
23439 +
23440 + err = get_user(mov1, (unsigned char __user *)regs->ip);
23441 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23442 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23443 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23444 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23445 +
23446 + if (err)
23447 + break;
23448 +
23449 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23450 + regs->cx = addr1;
23451 + regs->ax = addr2;
23452 + regs->ip = addr2;
23453 + return 2;
23454 + }
23455 + } while (0);
23456 +
23457 + do { /* PaX: gcc trampoline emulation #2 */
23458 + unsigned char mov, jmp;
23459 + unsigned int addr1, addr2;
23460 +
23461 +#ifdef CONFIG_X86_64
23462 + if ((regs->ip + 9) >> 32)
23463 + break;
23464 +#endif
23465 +
23466 + err = get_user(mov, (unsigned char __user *)regs->ip);
23467 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23468 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23469 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23470 +
23471 + if (err)
23472 + break;
23473 +
23474 + if (mov == 0xB9 && jmp == 0xE9) {
23475 + regs->cx = addr1;
23476 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23477 + return 2;
23478 + }
23479 + } while (0);
23480 +
23481 + return 1; /* PaX in action */
23482 +}
23483 +
23484 +#ifdef CONFIG_X86_64
23485 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23486 +{
23487 + int err;
23488 +
23489 + do { /* PaX: libffi trampoline emulation */
23490 + unsigned short mov1, mov2, jmp1;
23491 + unsigned char stcclc, jmp2;
23492 + unsigned long addr1, addr2;
23493 +
23494 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23495 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23496 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23497 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23498 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23499 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23500 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23501 +
23502 + if (err)
23503 + break;
23504 +
23505 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23506 + regs->r11 = addr1;
23507 + regs->r10 = addr2;
23508 + if (stcclc == 0xF8)
23509 + regs->flags &= ~X86_EFLAGS_CF;
23510 + else
23511 + regs->flags |= X86_EFLAGS_CF;
23512 + regs->ip = addr1;
23513 + return 2;
23514 + }
23515 + } while (0);
23516 +
23517 + do { /* PaX: gcc trampoline emulation #1 */
23518 + unsigned short mov1, mov2, jmp1;
23519 + unsigned char jmp2;
23520 + unsigned int addr1;
23521 + unsigned long addr2;
23522 +
23523 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23524 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23525 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23526 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23527 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23528 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23529 +
23530 + if (err)
23531 + break;
23532 +
23533 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23534 + regs->r11 = addr1;
23535 + regs->r10 = addr2;
23536 + regs->ip = addr1;
23537 + return 2;
23538 + }
23539 + } while (0);
23540 +
23541 + do { /* PaX: gcc trampoline emulation #2 */
23542 + unsigned short mov1, mov2, jmp1;
23543 + unsigned char jmp2;
23544 + unsigned long addr1, addr2;
23545 +
23546 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23547 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23548 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23549 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23550 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23551 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23552 +
23553 + if (err)
23554 + break;
23555 +
23556 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23557 + regs->r11 = addr1;
23558 + regs->r10 = addr2;
23559 + regs->ip = addr1;
23560 + return 2;
23561 + }
23562 + } while (0);
23563 +
23564 + return 1; /* PaX in action */
23565 +}
23566 +#endif
23567 +
23568 +/*
23569 + * PaX: decide what to do with offenders (regs->ip = fault address)
23570 + *
23571 + * returns 1 when task should be killed
23572 + * 2 when gcc trampoline was detected
23573 + */
23574 +static int pax_handle_fetch_fault(struct pt_regs *regs)
23575 +{
23576 + if (v8086_mode(regs))
23577 + return 1;
23578 +
23579 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23580 + return 1;
23581 +
23582 +#ifdef CONFIG_X86_32
23583 + return pax_handle_fetch_fault_32(regs);
23584 +#else
23585 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23586 + return pax_handle_fetch_fault_32(regs);
23587 + else
23588 + return pax_handle_fetch_fault_64(regs);
23589 +#endif
23590 +}
23591 +#endif
23592 +
23593 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23594 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
23595 +{
23596 + long i;
23597 +
23598 + printk(KERN_ERR "PAX: bytes at PC: ");
23599 + for (i = 0; i < 20; i++) {
23600 + unsigned char c;
23601 + if (get_user(c, (unsigned char __force_user *)pc+i))
23602 + printk(KERN_CONT "?? ");
23603 + else
23604 + printk(KERN_CONT "%02x ", c);
23605 + }
23606 + printk("\n");
23607 +
23608 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
23609 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
23610 + unsigned long c;
23611 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
23612 +#ifdef CONFIG_X86_32
23613 + printk(KERN_CONT "???????? ");
23614 +#else
23615 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
23616 + printk(KERN_CONT "???????? ???????? ");
23617 + else
23618 + printk(KERN_CONT "???????????????? ");
23619 +#endif
23620 + } else {
23621 +#ifdef CONFIG_X86_64
23622 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
23623 + printk(KERN_CONT "%08x ", (unsigned int)c);
23624 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
23625 + } else
23626 +#endif
23627 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
23628 + }
23629 + }
23630 + printk("\n");
23631 +}
23632 +#endif
23633 +
23634 +/**
23635 + * probe_kernel_write(): safely attempt to write to a location
23636 + * @dst: address to write to
23637 + * @src: pointer to the data that shall be written
23638 + * @size: size of the data chunk
23639 + *
23640 + * Safely write to address @dst from the buffer at @src. If a kernel fault
23641 + * happens, handle that and return -EFAULT.
23642 + */
23643 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
23644 +{
23645 + long ret;
23646 + mm_segment_t old_fs = get_fs();
23647 +
23648 + set_fs(KERNEL_DS);
23649 + pagefault_disable();
23650 + pax_open_kernel();
23651 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
23652 + pax_close_kernel();
23653 + pagefault_enable();
23654 + set_fs(old_fs);
23655 +
23656 + return ret ? -EFAULT : 0;
23657 +}
23658 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
23659 index dd74e46..7d26398 100644
23660 --- a/arch/x86/mm/gup.c
23661 +++ b/arch/x86/mm/gup.c
23662 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
23663 addr = start;
23664 len = (unsigned long) nr_pages << PAGE_SHIFT;
23665 end = start + len;
23666 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23667 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23668 (void __user *)start, len)))
23669 return 0;
23670
23671 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
23672 index f4f29b1..5cac4fb 100644
23673 --- a/arch/x86/mm/highmem_32.c
23674 +++ b/arch/x86/mm/highmem_32.c
23675 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
23676 idx = type + KM_TYPE_NR*smp_processor_id();
23677 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23678 BUG_ON(!pte_none(*(kmap_pte-idx)));
23679 +
23680 + pax_open_kernel();
23681 set_pte(kmap_pte-idx, mk_pte(page, prot));
23682 + pax_close_kernel();
23683 +
23684 arch_flush_lazy_mmu_mode();
23685
23686 return (void *)vaddr;
23687 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
23688 index 8ecbb4b..29efd37 100644
23689 --- a/arch/x86/mm/hugetlbpage.c
23690 +++ b/arch/x86/mm/hugetlbpage.c
23691 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
23692 struct hstate *h = hstate_file(file);
23693 struct mm_struct *mm = current->mm;
23694 struct vm_area_struct *vma;
23695 - unsigned long start_addr;
23696 + unsigned long start_addr, pax_task_size = TASK_SIZE;
23697 +
23698 +#ifdef CONFIG_PAX_SEGMEXEC
23699 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23700 + pax_task_size = SEGMEXEC_TASK_SIZE;
23701 +#endif
23702 +
23703 + pax_task_size -= PAGE_SIZE;
23704
23705 if (len > mm->cached_hole_size) {
23706 - start_addr = mm->free_area_cache;
23707 + start_addr = mm->free_area_cache;
23708 } else {
23709 - start_addr = TASK_UNMAPPED_BASE;
23710 - mm->cached_hole_size = 0;
23711 + start_addr = mm->mmap_base;
23712 + mm->cached_hole_size = 0;
23713 }
23714
23715 full_search:
23716 @@ -280,26 +287,27 @@ full_search:
23717
23718 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23719 /* At this point: (!vma || addr < vma->vm_end). */
23720 - if (TASK_SIZE - len < addr) {
23721 + if (pax_task_size - len < addr) {
23722 /*
23723 * Start a new search - just in case we missed
23724 * some holes.
23725 */
23726 - if (start_addr != TASK_UNMAPPED_BASE) {
23727 - start_addr = TASK_UNMAPPED_BASE;
23728 + if (start_addr != mm->mmap_base) {
23729 + start_addr = mm->mmap_base;
23730 mm->cached_hole_size = 0;
23731 goto full_search;
23732 }
23733 return -ENOMEM;
23734 }
23735 - if (!vma || addr + len <= vma->vm_start) {
23736 - mm->free_area_cache = addr + len;
23737 - return addr;
23738 - }
23739 + if (check_heap_stack_gap(vma, addr, len))
23740 + break;
23741 if (addr + mm->cached_hole_size < vma->vm_start)
23742 mm->cached_hole_size = vma->vm_start - addr;
23743 addr = ALIGN(vma->vm_end, huge_page_size(h));
23744 }
23745 +
23746 + mm->free_area_cache = addr + len;
23747 + return addr;
23748 }
23749
23750 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23751 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23752 {
23753 struct hstate *h = hstate_file(file);
23754 struct mm_struct *mm = current->mm;
23755 - struct vm_area_struct *vma, *prev_vma;
23756 - unsigned long base = mm->mmap_base, addr = addr0;
23757 + struct vm_area_struct *vma;
23758 + unsigned long base = mm->mmap_base, addr;
23759 unsigned long largest_hole = mm->cached_hole_size;
23760 - int first_time = 1;
23761
23762 /* don't allow allocations above current base */
23763 if (mm->free_area_cache > base)
23764 @@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23765 largest_hole = 0;
23766 mm->free_area_cache = base;
23767 }
23768 -try_again:
23769 +
23770 /* make sure it can fit in the remaining address space */
23771 if (mm->free_area_cache < len)
23772 goto fail;
23773
23774 /* either no address requested or can't fit in requested address hole */
23775 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
23776 + addr = (mm->free_area_cache - len);
23777 do {
23778 + addr &= huge_page_mask(h);
23779 + vma = find_vma(mm, addr);
23780 /*
23781 * Lookup failure means no vma is above this address,
23782 * i.e. return with success:
23783 - */
23784 - vma = find_vma(mm, addr);
23785 - if (!vma)
23786 - return addr;
23787 -
23788 - /*
23789 * new region fits between prev_vma->vm_end and
23790 * vma->vm_start, use it:
23791 */
23792 - prev_vma = vma->vm_prev;
23793 - if (addr + len <= vma->vm_start &&
23794 - (!prev_vma || (addr >= prev_vma->vm_end))) {
23795 + if (check_heap_stack_gap(vma, addr, len)) {
23796 /* remember the address as a hint for next time */
23797 - mm->cached_hole_size = largest_hole;
23798 - return (mm->free_area_cache = addr);
23799 - } else {
23800 - /* pull free_area_cache down to the first hole */
23801 - if (mm->free_area_cache == vma->vm_end) {
23802 - mm->free_area_cache = vma->vm_start;
23803 - mm->cached_hole_size = largest_hole;
23804 - }
23805 + mm->cached_hole_size = largest_hole;
23806 + return (mm->free_area_cache = addr);
23807 + }
23808 + /* pull free_area_cache down to the first hole */
23809 + if (mm->free_area_cache == vma->vm_end) {
23810 + mm->free_area_cache = vma->vm_start;
23811 + mm->cached_hole_size = largest_hole;
23812 }
23813
23814 /* remember the largest hole we saw so far */
23815 if (addr + largest_hole < vma->vm_start)
23816 - largest_hole = vma->vm_start - addr;
23817 + largest_hole = vma->vm_start - addr;
23818
23819 /* try just below the current vma->vm_start */
23820 - addr = (vma->vm_start - len) & huge_page_mask(h);
23821 - } while (len <= vma->vm_start);
23822 + addr = skip_heap_stack_gap(vma, len);
23823 + } while (!IS_ERR_VALUE(addr));
23824
23825 fail:
23826 /*
23827 - * if hint left us with no space for the requested
23828 - * mapping then try again:
23829 - */
23830 - if (first_time) {
23831 - mm->free_area_cache = base;
23832 - largest_hole = 0;
23833 - first_time = 0;
23834 - goto try_again;
23835 - }
23836 - /*
23837 * A failed mmap() very likely causes application failure,
23838 * so fall back to the bottom-up function here. This scenario
23839 * can happen with large stack limits and large mmap()
23840 * allocations.
23841 */
23842 - mm->free_area_cache = TASK_UNMAPPED_BASE;
23843 +
23844 +#ifdef CONFIG_PAX_SEGMEXEC
23845 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23846 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23847 + else
23848 +#endif
23849 +
23850 + mm->mmap_base = TASK_UNMAPPED_BASE;
23851 +
23852 +#ifdef CONFIG_PAX_RANDMMAP
23853 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23854 + mm->mmap_base += mm->delta_mmap;
23855 +#endif
23856 +
23857 + mm->free_area_cache = mm->mmap_base;
23858 mm->cached_hole_size = ~0UL;
23859 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
23860 len, pgoff, flags);
23861 @@ -388,6 +392,7 @@ fail:
23862 /*
23863 * Restore the topdown base:
23864 */
23865 + mm->mmap_base = base;
23866 mm->free_area_cache = base;
23867 mm->cached_hole_size = ~0UL;
23868
23869 @@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23870 struct hstate *h = hstate_file(file);
23871 struct mm_struct *mm = current->mm;
23872 struct vm_area_struct *vma;
23873 + unsigned long pax_task_size = TASK_SIZE;
23874
23875 if (len & ~huge_page_mask(h))
23876 return -EINVAL;
23877 - if (len > TASK_SIZE)
23878 +
23879 +#ifdef CONFIG_PAX_SEGMEXEC
23880 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23881 + pax_task_size = SEGMEXEC_TASK_SIZE;
23882 +#endif
23883 +
23884 + pax_task_size -= PAGE_SIZE;
23885 +
23886 + if (len > pax_task_size)
23887 return -ENOMEM;
23888
23889 if (flags & MAP_FIXED) {
23890 @@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23891 if (addr) {
23892 addr = ALIGN(addr, huge_page_size(h));
23893 vma = find_vma(mm, addr);
23894 - if (TASK_SIZE - len >= addr &&
23895 - (!vma || addr + len <= vma->vm_start))
23896 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
23897 return addr;
23898 }
23899 if (mm->get_unmapped_area == arch_get_unmapped_area)
23900 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
23901 index 6cabf65..77e9c1c 100644
23902 --- a/arch/x86/mm/init.c
23903 +++ b/arch/x86/mm/init.c
23904 @@ -17,6 +17,7 @@
23905 #include <asm/tlb.h>
23906 #include <asm/proto.h>
23907 #include <asm/dma.h> /* for MAX_DMA_PFN */
23908 +#include <asm/desc.h>
23909
23910 unsigned long __initdata pgt_buf_start;
23911 unsigned long __meminitdata pgt_buf_end;
23912 @@ -33,7 +34,7 @@ int direct_gbpages
23913 static void __init find_early_table_space(unsigned long end, int use_pse,
23914 int use_gbpages)
23915 {
23916 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
23917 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
23918 phys_addr_t base;
23919
23920 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
23921 @@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
23922 */
23923 int devmem_is_allowed(unsigned long pagenr)
23924 {
23925 +#ifdef CONFIG_GRKERNSEC_KMEM
23926 + /* allow BDA */
23927 + if (!pagenr)
23928 + return 1;
23929 + /* allow EBDA */
23930 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
23931 + return 1;
23932 +#else
23933 + if (!pagenr)
23934 + return 1;
23935 +#ifdef CONFIG_VM86
23936 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
23937 + return 1;
23938 +#endif
23939 +#endif
23940 +
23941 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23942 + return 1;
23943 +#ifdef CONFIG_GRKERNSEC_KMEM
23944 + /* throw out everything else below 1MB */
23945 if (pagenr <= 256)
23946 - return 1;
23947 + return 0;
23948 +#endif
23949 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
23950 return 0;
23951 if (!page_is_ram(pagenr))
23952 @@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
23953
23954 void free_initmem(void)
23955 {
23956 +
23957 +#ifdef CONFIG_PAX_KERNEXEC
23958 +#ifdef CONFIG_X86_32
23959 + /* PaX: limit KERNEL_CS to actual size */
23960 + unsigned long addr, limit;
23961 + struct desc_struct d;
23962 + int cpu;
23963 +
23964 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
23965 + limit = (limit - 1UL) >> PAGE_SHIFT;
23966 +
23967 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
23968 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23969 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
23970 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
23971 + }
23972 +
23973 + /* PaX: make KERNEL_CS read-only */
23974 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
23975 + if (!paravirt_enabled())
23976 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
23977 +/*
23978 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
23979 + pgd = pgd_offset_k(addr);
23980 + pud = pud_offset(pgd, addr);
23981 + pmd = pmd_offset(pud, addr);
23982 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23983 + }
23984 +*/
23985 +#ifdef CONFIG_X86_PAE
23986 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
23987 +/*
23988 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
23989 + pgd = pgd_offset_k(addr);
23990 + pud = pud_offset(pgd, addr);
23991 + pmd = pmd_offset(pud, addr);
23992 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23993 + }
23994 +*/
23995 +#endif
23996 +
23997 +#ifdef CONFIG_MODULES
23998 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
23999 +#endif
24000 +
24001 +#else
24002 + pgd_t *pgd;
24003 + pud_t *pud;
24004 + pmd_t *pmd;
24005 + unsigned long addr, end;
24006 +
24007 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24008 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24009 + pgd = pgd_offset_k(addr);
24010 + pud = pud_offset(pgd, addr);
24011 + pmd = pmd_offset(pud, addr);
24012 + if (!pmd_present(*pmd))
24013 + continue;
24014 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24015 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24016 + else
24017 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24018 + }
24019 +
24020 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24021 + end = addr + KERNEL_IMAGE_SIZE;
24022 + for (; addr < end; addr += PMD_SIZE) {
24023 + pgd = pgd_offset_k(addr);
24024 + pud = pud_offset(pgd, addr);
24025 + pmd = pmd_offset(pud, addr);
24026 + if (!pmd_present(*pmd))
24027 + continue;
24028 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24029 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24030 + }
24031 +#endif
24032 +
24033 + flush_tlb_all();
24034 +#endif
24035 +
24036 free_init_pages("unused kernel memory",
24037 (unsigned long)(&__init_begin),
24038 (unsigned long)(&__init_end));
24039 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24040 index 8663f6c..829ae76 100644
24041 --- a/arch/x86/mm/init_32.c
24042 +++ b/arch/x86/mm/init_32.c
24043 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24044 }
24045
24046 /*
24047 - * Creates a middle page table and puts a pointer to it in the
24048 - * given global directory entry. This only returns the gd entry
24049 - * in non-PAE compilation mode, since the middle layer is folded.
24050 - */
24051 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24052 -{
24053 - pud_t *pud;
24054 - pmd_t *pmd_table;
24055 -
24056 -#ifdef CONFIG_X86_PAE
24057 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24058 - if (after_bootmem)
24059 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24060 - else
24061 - pmd_table = (pmd_t *)alloc_low_page();
24062 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24063 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24064 - pud = pud_offset(pgd, 0);
24065 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24066 -
24067 - return pmd_table;
24068 - }
24069 -#endif
24070 - pud = pud_offset(pgd, 0);
24071 - pmd_table = pmd_offset(pud, 0);
24072 -
24073 - return pmd_table;
24074 -}
24075 -
24076 -/*
24077 * Create a page table and place a pointer to it in a middle page
24078 * directory entry:
24079 */
24080 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24081 page_table = (pte_t *)alloc_low_page();
24082
24083 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24084 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24085 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24086 +#else
24087 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24088 +#endif
24089 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24090 }
24091
24092 return pte_offset_kernel(pmd, 0);
24093 }
24094
24095 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24096 +{
24097 + pud_t *pud;
24098 + pmd_t *pmd_table;
24099 +
24100 + pud = pud_offset(pgd, 0);
24101 + pmd_table = pmd_offset(pud, 0);
24102 +
24103 + return pmd_table;
24104 +}
24105 +
24106 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24107 {
24108 int pgd_idx = pgd_index(vaddr);
24109 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24110 int pgd_idx, pmd_idx;
24111 unsigned long vaddr;
24112 pgd_t *pgd;
24113 + pud_t *pud;
24114 pmd_t *pmd;
24115 pte_t *pte = NULL;
24116
24117 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24118 pgd = pgd_base + pgd_idx;
24119
24120 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24121 - pmd = one_md_table_init(pgd);
24122 - pmd = pmd + pmd_index(vaddr);
24123 + pud = pud_offset(pgd, vaddr);
24124 + pmd = pmd_offset(pud, vaddr);
24125 +
24126 +#ifdef CONFIG_X86_PAE
24127 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24128 +#endif
24129 +
24130 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24131 pmd++, pmd_idx++) {
24132 pte = page_table_kmap_check(one_page_table_init(pmd),
24133 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24134 }
24135 }
24136
24137 -static inline int is_kernel_text(unsigned long addr)
24138 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24139 {
24140 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24141 - return 1;
24142 - return 0;
24143 + if ((start > ktla_ktva((unsigned long)_etext) ||
24144 + end <= ktla_ktva((unsigned long)_stext)) &&
24145 + (start > ktla_ktva((unsigned long)_einittext) ||
24146 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24147 +
24148 +#ifdef CONFIG_ACPI_SLEEP
24149 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24150 +#endif
24151 +
24152 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24153 + return 0;
24154 + return 1;
24155 }
24156
24157 /*
24158 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24159 unsigned long last_map_addr = end;
24160 unsigned long start_pfn, end_pfn;
24161 pgd_t *pgd_base = swapper_pg_dir;
24162 - int pgd_idx, pmd_idx, pte_ofs;
24163 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24164 unsigned long pfn;
24165 pgd_t *pgd;
24166 + pud_t *pud;
24167 pmd_t *pmd;
24168 pte_t *pte;
24169 unsigned pages_2m, pages_4k;
24170 @@ -281,8 +282,13 @@ repeat:
24171 pfn = start_pfn;
24172 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24173 pgd = pgd_base + pgd_idx;
24174 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24175 - pmd = one_md_table_init(pgd);
24176 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24177 + pud = pud_offset(pgd, 0);
24178 + pmd = pmd_offset(pud, 0);
24179 +
24180 +#ifdef CONFIG_X86_PAE
24181 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24182 +#endif
24183
24184 if (pfn >= end_pfn)
24185 continue;
24186 @@ -294,14 +300,13 @@ repeat:
24187 #endif
24188 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24189 pmd++, pmd_idx++) {
24190 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24191 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24192
24193 /*
24194 * Map with big pages if possible, otherwise
24195 * create normal page tables:
24196 */
24197 if (use_pse) {
24198 - unsigned int addr2;
24199 pgprot_t prot = PAGE_KERNEL_LARGE;
24200 /*
24201 * first pass will use the same initial
24202 @@ -311,11 +316,7 @@ repeat:
24203 __pgprot(PTE_IDENT_ATTR |
24204 _PAGE_PSE);
24205
24206 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24207 - PAGE_OFFSET + PAGE_SIZE-1;
24208 -
24209 - if (is_kernel_text(addr) ||
24210 - is_kernel_text(addr2))
24211 + if (is_kernel_text(address, address + PMD_SIZE))
24212 prot = PAGE_KERNEL_LARGE_EXEC;
24213
24214 pages_2m++;
24215 @@ -332,7 +333,7 @@ repeat:
24216 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24217 pte += pte_ofs;
24218 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24219 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24220 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24221 pgprot_t prot = PAGE_KERNEL;
24222 /*
24223 * first pass will use the same initial
24224 @@ -340,7 +341,7 @@ repeat:
24225 */
24226 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24227
24228 - if (is_kernel_text(addr))
24229 + if (is_kernel_text(address, address + PAGE_SIZE))
24230 prot = PAGE_KERNEL_EXEC;
24231
24232 pages_4k++;
24233 @@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24234
24235 pud = pud_offset(pgd, va);
24236 pmd = pmd_offset(pud, va);
24237 - if (!pmd_present(*pmd))
24238 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24239 break;
24240
24241 pte = pte_offset_kernel(pmd, va);
24242 @@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
24243
24244 static void __init pagetable_init(void)
24245 {
24246 - pgd_t *pgd_base = swapper_pg_dir;
24247 -
24248 - permanent_kmaps_init(pgd_base);
24249 + permanent_kmaps_init(swapper_pg_dir);
24250 }
24251
24252 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24253 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24254 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24255
24256 /* user-defined highmem size */
24257 @@ -735,6 +734,12 @@ void __init mem_init(void)
24258
24259 pci_iommu_alloc();
24260
24261 +#ifdef CONFIG_PAX_PER_CPU_PGD
24262 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24263 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24264 + KERNEL_PGD_PTRS);
24265 +#endif
24266 +
24267 #ifdef CONFIG_FLATMEM
24268 BUG_ON(!mem_map);
24269 #endif
24270 @@ -761,7 +766,7 @@ void __init mem_init(void)
24271 reservedpages++;
24272
24273 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24274 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24275 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24276 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24277
24278 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24279 @@ -802,10 +807,10 @@ void __init mem_init(void)
24280 ((unsigned long)&__init_end -
24281 (unsigned long)&__init_begin) >> 10,
24282
24283 - (unsigned long)&_etext, (unsigned long)&_edata,
24284 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24285 + (unsigned long)&_sdata, (unsigned long)&_edata,
24286 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24287
24288 - (unsigned long)&_text, (unsigned long)&_etext,
24289 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24290 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24291
24292 /*
24293 @@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
24294 if (!kernel_set_to_readonly)
24295 return;
24296
24297 + start = ktla_ktva(start);
24298 pr_debug("Set kernel text: %lx - %lx for read write\n",
24299 start, start+size);
24300
24301 @@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
24302 if (!kernel_set_to_readonly)
24303 return;
24304
24305 + start = ktla_ktva(start);
24306 pr_debug("Set kernel text: %lx - %lx for read only\n",
24307 start, start+size);
24308
24309 @@ -925,6 +932,7 @@ void mark_rodata_ro(void)
24310 unsigned long start = PFN_ALIGN(_text);
24311 unsigned long size = PFN_ALIGN(_etext) - start;
24312
24313 + start = ktla_ktva(start);
24314 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24315 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24316 size >> 10);
24317 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24318 index 436a030..b8596b9 100644
24319 --- a/arch/x86/mm/init_64.c
24320 +++ b/arch/x86/mm/init_64.c
24321 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24322 * around without checking the pgd every time.
24323 */
24324
24325 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24326 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24327 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24328
24329 int force_personality32;
24330 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24331
24332 for (address = start; address <= end; address += PGDIR_SIZE) {
24333 const pgd_t *pgd_ref = pgd_offset_k(address);
24334 +
24335 +#ifdef CONFIG_PAX_PER_CPU_PGD
24336 + unsigned long cpu;
24337 +#else
24338 struct page *page;
24339 +#endif
24340
24341 if (pgd_none(*pgd_ref))
24342 continue;
24343
24344 spin_lock(&pgd_lock);
24345 +
24346 +#ifdef CONFIG_PAX_PER_CPU_PGD
24347 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24348 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24349 +#else
24350 list_for_each_entry(page, &pgd_list, lru) {
24351 pgd_t *pgd;
24352 spinlock_t *pgt_lock;
24353 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24354 /* the pgt_lock only for Xen */
24355 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24356 spin_lock(pgt_lock);
24357 +#endif
24358
24359 if (pgd_none(*pgd))
24360 set_pgd(pgd, *pgd_ref);
24361 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24362 BUG_ON(pgd_page_vaddr(*pgd)
24363 != pgd_page_vaddr(*pgd_ref));
24364
24365 +#ifndef CONFIG_PAX_PER_CPU_PGD
24366 spin_unlock(pgt_lock);
24367 +#endif
24368 +
24369 }
24370 spin_unlock(&pgd_lock);
24371 }
24372 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24373 pmd = fill_pmd(pud, vaddr);
24374 pte = fill_pte(pmd, vaddr);
24375
24376 + pax_open_kernel();
24377 set_pte(pte, new_pte);
24378 + pax_close_kernel();
24379
24380 /*
24381 * It's enough to flush this one mapping.
24382 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24383 pgd = pgd_offset_k((unsigned long)__va(phys));
24384 if (pgd_none(*pgd)) {
24385 pud = (pud_t *) spp_getpage();
24386 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24387 - _PAGE_USER));
24388 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24389 }
24390 pud = pud_offset(pgd, (unsigned long)__va(phys));
24391 if (pud_none(*pud)) {
24392 pmd = (pmd_t *) spp_getpage();
24393 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24394 - _PAGE_USER));
24395 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24396 }
24397 pmd = pmd_offset(pud, phys);
24398 BUG_ON(!pmd_none(*pmd));
24399 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24400 if (pfn >= pgt_buf_top)
24401 panic("alloc_low_page: ran out of memory");
24402
24403 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24404 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24405 clear_page(adr);
24406 *phys = pfn * PAGE_SIZE;
24407 return adr;
24408 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24409
24410 phys = __pa(virt);
24411 left = phys & (PAGE_SIZE - 1);
24412 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24413 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24414 adr = (void *)(((unsigned long)adr) | left);
24415
24416 return adr;
24417 @@ -684,6 +698,12 @@ void __init mem_init(void)
24418
24419 pci_iommu_alloc();
24420
24421 +#ifdef CONFIG_PAX_PER_CPU_PGD
24422 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24423 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24424 + KERNEL_PGD_PTRS);
24425 +#endif
24426 +
24427 /* clear_bss() already clear the empty_zero_page */
24428
24429 reservedpages = 0;
24430 @@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
24431 static struct vm_area_struct gate_vma = {
24432 .vm_start = VSYSCALL_START,
24433 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24434 - .vm_page_prot = PAGE_READONLY_EXEC,
24435 - .vm_flags = VM_READ | VM_EXEC
24436 + .vm_page_prot = PAGE_READONLY,
24437 + .vm_flags = VM_READ
24438 };
24439
24440 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24441 @@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
24442
24443 const char *arch_vma_name(struct vm_area_struct *vma)
24444 {
24445 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24446 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24447 return "[vdso]";
24448 if (vma == &gate_vma)
24449 return "[vsyscall]";
24450 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24451 index 7b179b4..6bd1777 100644
24452 --- a/arch/x86/mm/iomap_32.c
24453 +++ b/arch/x86/mm/iomap_32.c
24454 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24455 type = kmap_atomic_idx_push();
24456 idx = type + KM_TYPE_NR * smp_processor_id();
24457 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24458 +
24459 + pax_open_kernel();
24460 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24461 + pax_close_kernel();
24462 +
24463 arch_flush_lazy_mmu_mode();
24464
24465 return (void *)vaddr;
24466 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24467 index be1ef57..55f0160 100644
24468 --- a/arch/x86/mm/ioremap.c
24469 +++ b/arch/x86/mm/ioremap.c
24470 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24471 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24472 int is_ram = page_is_ram(pfn);
24473
24474 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24475 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24476 return NULL;
24477 WARN_ON_ONCE(is_ram);
24478 }
24479 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24480
24481 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24482 if (page_is_ram(start >> PAGE_SHIFT))
24483 +#ifdef CONFIG_HIGHMEM
24484 + if ((start >> PAGE_SHIFT) < max_low_pfn)
24485 +#endif
24486 return __va(phys);
24487
24488 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24489 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24490 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24491
24492 static __initdata int after_paging_init;
24493 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24494 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24495
24496 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24497 {
24498 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24499 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24500
24501 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24502 - memset(bm_pte, 0, sizeof(bm_pte));
24503 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
24504 + pmd_populate_user(&init_mm, pmd, bm_pte);
24505
24506 /*
24507 * The boot-ioremap range spans multiple pmds, for which
24508 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24509 index d87dd6d..bf3fa66 100644
24510 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
24511 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24512 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24513 * memory (e.g. tracked pages)? For now, we need this to avoid
24514 * invoking kmemcheck for PnP BIOS calls.
24515 */
24516 - if (regs->flags & X86_VM_MASK)
24517 + if (v8086_mode(regs))
24518 return false;
24519 - if (regs->cs != __KERNEL_CS)
24520 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24521 return false;
24522
24523 pte = kmemcheck_pte_lookup(address);
24524 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24525 index 845df68..1d8d29f 100644
24526 --- a/arch/x86/mm/mmap.c
24527 +++ b/arch/x86/mm/mmap.c
24528 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24529 * Leave an at least ~128 MB hole with possible stack randomization.
24530 */
24531 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24532 -#define MAX_GAP (TASK_SIZE/6*5)
24533 +#define MAX_GAP (pax_task_size/6*5)
24534
24535 static int mmap_is_legacy(void)
24536 {
24537 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24538 return rnd << PAGE_SHIFT;
24539 }
24540
24541 -static unsigned long mmap_base(void)
24542 +static unsigned long mmap_base(struct mm_struct *mm)
24543 {
24544 unsigned long gap = rlimit(RLIMIT_STACK);
24545 + unsigned long pax_task_size = TASK_SIZE;
24546 +
24547 +#ifdef CONFIG_PAX_SEGMEXEC
24548 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24549 + pax_task_size = SEGMEXEC_TASK_SIZE;
24550 +#endif
24551
24552 if (gap < MIN_GAP)
24553 gap = MIN_GAP;
24554 else if (gap > MAX_GAP)
24555 gap = MAX_GAP;
24556
24557 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24558 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24559 }
24560
24561 /*
24562 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24563 * does, but not when emulating X86_32
24564 */
24565 -static unsigned long mmap_legacy_base(void)
24566 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
24567 {
24568 - if (mmap_is_ia32())
24569 + if (mmap_is_ia32()) {
24570 +
24571 +#ifdef CONFIG_PAX_SEGMEXEC
24572 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24573 + return SEGMEXEC_TASK_UNMAPPED_BASE;
24574 + else
24575 +#endif
24576 +
24577 return TASK_UNMAPPED_BASE;
24578 - else
24579 + } else
24580 return TASK_UNMAPPED_BASE + mmap_rnd();
24581 }
24582
24583 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24584 void arch_pick_mmap_layout(struct mm_struct *mm)
24585 {
24586 if (mmap_is_legacy()) {
24587 - mm->mmap_base = mmap_legacy_base();
24588 + mm->mmap_base = mmap_legacy_base(mm);
24589 +
24590 +#ifdef CONFIG_PAX_RANDMMAP
24591 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24592 + mm->mmap_base += mm->delta_mmap;
24593 +#endif
24594 +
24595 mm->get_unmapped_area = arch_get_unmapped_area;
24596 mm->unmap_area = arch_unmap_area;
24597 } else {
24598 - mm->mmap_base = mmap_base();
24599 + mm->mmap_base = mmap_base(mm);
24600 +
24601 +#ifdef CONFIG_PAX_RANDMMAP
24602 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24603 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
24604 +#endif
24605 +
24606 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
24607 mm->unmap_area = arch_unmap_area_topdown;
24608 }
24609 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
24610 index dc0b727..dc9d71a 100644
24611 --- a/arch/x86/mm/mmio-mod.c
24612 +++ b/arch/x86/mm/mmio-mod.c
24613 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
24614 break;
24615 default:
24616 {
24617 - unsigned char *ip = (unsigned char *)instptr;
24618 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
24619 my_trace->opcode = MMIO_UNKNOWN_OP;
24620 my_trace->width = 0;
24621 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
24622 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
24623 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24624 void __iomem *addr)
24625 {
24626 - static atomic_t next_id;
24627 + static atomic_unchecked_t next_id;
24628 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
24629 /* These are page-unaligned. */
24630 struct mmiotrace_map map = {
24631 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24632 .private = trace
24633 },
24634 .phys = offset,
24635 - .id = atomic_inc_return(&next_id)
24636 + .id = atomic_inc_return_unchecked(&next_id)
24637 };
24638 map.map_id = trace->id;
24639
24640 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
24641 index b008656..773eac2 100644
24642 --- a/arch/x86/mm/pageattr-test.c
24643 +++ b/arch/x86/mm/pageattr-test.c
24644 @@ -36,7 +36,7 @@ enum {
24645
24646 static int pte_testbit(pte_t pte)
24647 {
24648 - return pte_flags(pte) & _PAGE_UNUSED1;
24649 + return pte_flags(pte) & _PAGE_CPA_TEST;
24650 }
24651
24652 struct split_state {
24653 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
24654 index e1ebde3..b1e1db38 100644
24655 --- a/arch/x86/mm/pageattr.c
24656 +++ b/arch/x86/mm/pageattr.c
24657 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24658 */
24659 #ifdef CONFIG_PCI_BIOS
24660 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
24661 - pgprot_val(forbidden) |= _PAGE_NX;
24662 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24663 #endif
24664
24665 /*
24666 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24667 * Does not cover __inittext since that is gone later on. On
24668 * 64bit we do not enforce !NX on the low mapping
24669 */
24670 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
24671 - pgprot_val(forbidden) |= _PAGE_NX;
24672 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
24673 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24674
24675 +#ifdef CONFIG_DEBUG_RODATA
24676 /*
24677 * The .rodata section needs to be read-only. Using the pfn
24678 * catches all aliases.
24679 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24680 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
24681 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
24682 pgprot_val(forbidden) |= _PAGE_RW;
24683 +#endif
24684
24685 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
24686 /*
24687 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24688 }
24689 #endif
24690
24691 +#ifdef CONFIG_PAX_KERNEXEC
24692 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
24693 + pgprot_val(forbidden) |= _PAGE_RW;
24694 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24695 + }
24696 +#endif
24697 +
24698 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
24699
24700 return prot;
24701 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
24702 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
24703 {
24704 /* change init_mm */
24705 + pax_open_kernel();
24706 set_pte_atomic(kpte, pte);
24707 +
24708 #ifdef CONFIG_X86_32
24709 if (!SHARED_KERNEL_PMD) {
24710 +
24711 +#ifdef CONFIG_PAX_PER_CPU_PGD
24712 + unsigned long cpu;
24713 +#else
24714 struct page *page;
24715 +#endif
24716
24717 +#ifdef CONFIG_PAX_PER_CPU_PGD
24718 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24719 + pgd_t *pgd = get_cpu_pgd(cpu);
24720 +#else
24721 list_for_each_entry(page, &pgd_list, lru) {
24722 - pgd_t *pgd;
24723 + pgd_t *pgd = (pgd_t *)page_address(page);
24724 +#endif
24725 +
24726 pud_t *pud;
24727 pmd_t *pmd;
24728
24729 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
24730 + pgd += pgd_index(address);
24731 pud = pud_offset(pgd, address);
24732 pmd = pmd_offset(pud, address);
24733 set_pte_atomic((pte_t *)pmd, pte);
24734 }
24735 }
24736 #endif
24737 + pax_close_kernel();
24738 }
24739
24740 static int
24741 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
24742 index f6ff57b..481690f 100644
24743 --- a/arch/x86/mm/pat.c
24744 +++ b/arch/x86/mm/pat.c
24745 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
24746
24747 if (!entry) {
24748 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
24749 - current->comm, current->pid, start, end);
24750 + current->comm, task_pid_nr(current), start, end);
24751 return -EINVAL;
24752 }
24753
24754 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24755 while (cursor < to) {
24756 if (!devmem_is_allowed(pfn)) {
24757 printk(KERN_INFO
24758 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24759 - current->comm, from, to);
24760 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
24761 + current->comm, from, to, cursor);
24762 return 0;
24763 }
24764 cursor += PAGE_SIZE;
24765 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
24766 printk(KERN_INFO
24767 "%s:%d ioremap_change_attr failed %s "
24768 "for %Lx-%Lx\n",
24769 - current->comm, current->pid,
24770 + current->comm, task_pid_nr(current),
24771 cattr_name(flags),
24772 base, (unsigned long long)(base + size));
24773 return -EINVAL;
24774 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24775 if (want_flags != flags) {
24776 printk(KERN_WARNING
24777 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
24778 - current->comm, current->pid,
24779 + current->comm, task_pid_nr(current),
24780 cattr_name(want_flags),
24781 (unsigned long long)paddr,
24782 (unsigned long long)(paddr + size),
24783 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24784 free_memtype(paddr, paddr + size);
24785 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
24786 " for %Lx-%Lx, got %s\n",
24787 - current->comm, current->pid,
24788 + current->comm, task_pid_nr(current),
24789 cattr_name(want_flags),
24790 (unsigned long long)paddr,
24791 (unsigned long long)(paddr + size),
24792 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
24793 index 9f0614d..92ae64a 100644
24794 --- a/arch/x86/mm/pf_in.c
24795 +++ b/arch/x86/mm/pf_in.c
24796 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
24797 int i;
24798 enum reason_type rv = OTHERS;
24799
24800 - p = (unsigned char *)ins_addr;
24801 + p = (unsigned char *)ktla_ktva(ins_addr);
24802 p += skip_prefix(p, &prf);
24803 p += get_opcode(p, &opcode);
24804
24805 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
24806 struct prefix_bits prf;
24807 int i;
24808
24809 - p = (unsigned char *)ins_addr;
24810 + p = (unsigned char *)ktla_ktva(ins_addr);
24811 p += skip_prefix(p, &prf);
24812 p += get_opcode(p, &opcode);
24813
24814 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
24815 struct prefix_bits prf;
24816 int i;
24817
24818 - p = (unsigned char *)ins_addr;
24819 + p = (unsigned char *)ktla_ktva(ins_addr);
24820 p += skip_prefix(p, &prf);
24821 p += get_opcode(p, &opcode);
24822
24823 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
24824 struct prefix_bits prf;
24825 int i;
24826
24827 - p = (unsigned char *)ins_addr;
24828 + p = (unsigned char *)ktla_ktva(ins_addr);
24829 p += skip_prefix(p, &prf);
24830 p += get_opcode(p, &opcode);
24831 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
24832 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
24833 struct prefix_bits prf;
24834 int i;
24835
24836 - p = (unsigned char *)ins_addr;
24837 + p = (unsigned char *)ktla_ktva(ins_addr);
24838 p += skip_prefix(p, &prf);
24839 p += get_opcode(p, &opcode);
24840 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
24841 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
24842 index 8573b83..c3b1a30 100644
24843 --- a/arch/x86/mm/pgtable.c
24844 +++ b/arch/x86/mm/pgtable.c
24845 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
24846 list_del(&page->lru);
24847 }
24848
24849 -#define UNSHARED_PTRS_PER_PGD \
24850 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24851 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24852 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
24853
24854 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24855 +{
24856 + while (count--)
24857 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
24858 +}
24859 +#endif
24860
24861 +#ifdef CONFIG_PAX_PER_CPU_PGD
24862 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24863 +{
24864 + while (count--)
24865 +
24866 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24867 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
24868 +#else
24869 + *dst++ = *src++;
24870 +#endif
24871 +
24872 +}
24873 +#endif
24874 +
24875 +#ifdef CONFIG_X86_64
24876 +#define pxd_t pud_t
24877 +#define pyd_t pgd_t
24878 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
24879 +#define pxd_free(mm, pud) pud_free((mm), (pud))
24880 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
24881 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
24882 +#define PYD_SIZE PGDIR_SIZE
24883 +#else
24884 +#define pxd_t pmd_t
24885 +#define pyd_t pud_t
24886 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
24887 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
24888 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
24889 +#define pyd_offset(mm, address) pud_offset((mm), (address))
24890 +#define PYD_SIZE PUD_SIZE
24891 +#endif
24892 +
24893 +#ifdef CONFIG_PAX_PER_CPU_PGD
24894 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
24895 +static inline void pgd_dtor(pgd_t *pgd) {}
24896 +#else
24897 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
24898 {
24899 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
24900 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
24901 pgd_list_del(pgd);
24902 spin_unlock(&pgd_lock);
24903 }
24904 +#endif
24905
24906 /*
24907 * List of all pgd's needed for non-PAE so it can invalidate entries
24908 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
24909 * -- wli
24910 */
24911
24912 -#ifdef CONFIG_X86_PAE
24913 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24914 /*
24915 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
24916 * updating the top-level pagetable entries to guarantee the
24917 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
24918 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
24919 * and initialize the kernel pmds here.
24920 */
24921 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
24922 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24923
24924 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24925 {
24926 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24927 */
24928 flush_tlb_mm(mm);
24929 }
24930 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
24931 +#define PREALLOCATED_PXDS USER_PGD_PTRS
24932 #else /* !CONFIG_X86_PAE */
24933
24934 /* No need to prepopulate any pagetable entries in non-PAE modes. */
24935 -#define PREALLOCATED_PMDS 0
24936 +#define PREALLOCATED_PXDS 0
24937
24938 #endif /* CONFIG_X86_PAE */
24939
24940 -static void free_pmds(pmd_t *pmds[])
24941 +static void free_pxds(pxd_t *pxds[])
24942 {
24943 int i;
24944
24945 - for(i = 0; i < PREALLOCATED_PMDS; i++)
24946 - if (pmds[i])
24947 - free_page((unsigned long)pmds[i]);
24948 + for(i = 0; i < PREALLOCATED_PXDS; i++)
24949 + if (pxds[i])
24950 + free_page((unsigned long)pxds[i]);
24951 }
24952
24953 -static int preallocate_pmds(pmd_t *pmds[])
24954 +static int preallocate_pxds(pxd_t *pxds[])
24955 {
24956 int i;
24957 bool failed = false;
24958
24959 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24960 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
24961 - if (pmd == NULL)
24962 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24963 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
24964 + if (pxd == NULL)
24965 failed = true;
24966 - pmds[i] = pmd;
24967 + pxds[i] = pxd;
24968 }
24969
24970 if (failed) {
24971 - free_pmds(pmds);
24972 + free_pxds(pxds);
24973 return -ENOMEM;
24974 }
24975
24976 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
24977 * preallocate which never got a corresponding vma will need to be
24978 * freed manually.
24979 */
24980 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
24981 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
24982 {
24983 int i;
24984
24985 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24986 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24987 pgd_t pgd = pgdp[i];
24988
24989 if (pgd_val(pgd) != 0) {
24990 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
24991 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
24992
24993 - pgdp[i] = native_make_pgd(0);
24994 + set_pgd(pgdp + i, native_make_pgd(0));
24995
24996 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
24997 - pmd_free(mm, pmd);
24998 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
24999 + pxd_free(mm, pxd);
25000 }
25001 }
25002 }
25003
25004 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25005 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25006 {
25007 - pud_t *pud;
25008 + pyd_t *pyd;
25009 unsigned long addr;
25010 int i;
25011
25012 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25013 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25014 return;
25015
25016 - pud = pud_offset(pgd, 0);
25017 +#ifdef CONFIG_X86_64
25018 + pyd = pyd_offset(mm, 0L);
25019 +#else
25020 + pyd = pyd_offset(pgd, 0L);
25021 +#endif
25022
25023 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25024 - i++, pud++, addr += PUD_SIZE) {
25025 - pmd_t *pmd = pmds[i];
25026 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25027 + i++, pyd++, addr += PYD_SIZE) {
25028 + pxd_t *pxd = pxds[i];
25029
25030 if (i >= KERNEL_PGD_BOUNDARY)
25031 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25032 - sizeof(pmd_t) * PTRS_PER_PMD);
25033 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25034 + sizeof(pxd_t) * PTRS_PER_PMD);
25035
25036 - pud_populate(mm, pud, pmd);
25037 + pyd_populate(mm, pyd, pxd);
25038 }
25039 }
25040
25041 pgd_t *pgd_alloc(struct mm_struct *mm)
25042 {
25043 pgd_t *pgd;
25044 - pmd_t *pmds[PREALLOCATED_PMDS];
25045 + pxd_t *pxds[PREALLOCATED_PXDS];
25046
25047 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25048
25049 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25050
25051 mm->pgd = pgd;
25052
25053 - if (preallocate_pmds(pmds) != 0)
25054 + if (preallocate_pxds(pxds) != 0)
25055 goto out_free_pgd;
25056
25057 if (paravirt_pgd_alloc(mm) != 0)
25058 - goto out_free_pmds;
25059 + goto out_free_pxds;
25060
25061 /*
25062 * Make sure that pre-populating the pmds is atomic with
25063 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25064 spin_lock(&pgd_lock);
25065
25066 pgd_ctor(mm, pgd);
25067 - pgd_prepopulate_pmd(mm, pgd, pmds);
25068 + pgd_prepopulate_pxd(mm, pgd, pxds);
25069
25070 spin_unlock(&pgd_lock);
25071
25072 return pgd;
25073
25074 -out_free_pmds:
25075 - free_pmds(pmds);
25076 +out_free_pxds:
25077 + free_pxds(pxds);
25078 out_free_pgd:
25079 free_page((unsigned long)pgd);
25080 out:
25081 @@ -295,7 +344,7 @@ out:
25082
25083 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25084 {
25085 - pgd_mop_up_pmds(mm, pgd);
25086 + pgd_mop_up_pxds(mm, pgd);
25087 pgd_dtor(pgd);
25088 paravirt_pgd_free(mm, pgd);
25089 free_page((unsigned long)pgd);
25090 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25091 index cac7184..09a39fa 100644
25092 --- a/arch/x86/mm/pgtable_32.c
25093 +++ b/arch/x86/mm/pgtable_32.c
25094 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25095 return;
25096 }
25097 pte = pte_offset_kernel(pmd, vaddr);
25098 +
25099 + pax_open_kernel();
25100 if (pte_val(pteval))
25101 set_pte_at(&init_mm, vaddr, pte, pteval);
25102 else
25103 pte_clear(&init_mm, vaddr, pte);
25104 + pax_close_kernel();
25105
25106 /*
25107 * It's enough to flush this one mapping.
25108 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25109 index 410531d..0f16030 100644
25110 --- a/arch/x86/mm/setup_nx.c
25111 +++ b/arch/x86/mm/setup_nx.c
25112 @@ -5,8 +5,10 @@
25113 #include <asm/pgtable.h>
25114 #include <asm/proto.h>
25115
25116 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25117 static int disable_nx __cpuinitdata;
25118
25119 +#ifndef CONFIG_PAX_PAGEEXEC
25120 /*
25121 * noexec = on|off
25122 *
25123 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25124 return 0;
25125 }
25126 early_param("noexec", noexec_setup);
25127 +#endif
25128 +
25129 +#endif
25130
25131 void __cpuinit x86_configure_nx(void)
25132 {
25133 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25134 if (cpu_has_nx && !disable_nx)
25135 __supported_pte_mask |= _PAGE_NX;
25136 else
25137 +#endif
25138 __supported_pte_mask &= ~_PAGE_NX;
25139 }
25140
25141 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25142 index d6c0418..06a0ad5 100644
25143 --- a/arch/x86/mm/tlb.c
25144 +++ b/arch/x86/mm/tlb.c
25145 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25146 BUG();
25147 cpumask_clear_cpu(cpu,
25148 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25149 +
25150 +#ifndef CONFIG_PAX_PER_CPU_PGD
25151 load_cr3(swapper_pg_dir);
25152 +#endif
25153 +
25154 }
25155 EXPORT_SYMBOL_GPL(leave_mm);
25156
25157 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25158 index 6687022..ceabcfa 100644
25159 --- a/arch/x86/net/bpf_jit.S
25160 +++ b/arch/x86/net/bpf_jit.S
25161 @@ -9,6 +9,7 @@
25162 */
25163 #include <linux/linkage.h>
25164 #include <asm/dwarf2.h>
25165 +#include <asm/alternative-asm.h>
25166
25167 /*
25168 * Calling convention :
25169 @@ -35,6 +36,7 @@ sk_load_word:
25170 jle bpf_slow_path_word
25171 mov (SKBDATA,%rsi),%eax
25172 bswap %eax /* ntohl() */
25173 + pax_force_retaddr
25174 ret
25175
25176
25177 @@ -53,6 +55,7 @@ sk_load_half:
25178 jle bpf_slow_path_half
25179 movzwl (SKBDATA,%rsi),%eax
25180 rol $8,%ax # ntohs()
25181 + pax_force_retaddr
25182 ret
25183
25184 sk_load_byte_ind:
25185 @@ -66,6 +69,7 @@ sk_load_byte:
25186 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25187 jle bpf_slow_path_byte
25188 movzbl (SKBDATA,%rsi),%eax
25189 + pax_force_retaddr
25190 ret
25191
25192 /**
25193 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25194 movzbl (SKBDATA,%rsi),%ebx
25195 and $15,%bl
25196 shl $2,%bl
25197 + pax_force_retaddr
25198 ret
25199 CFI_ENDPROC
25200 ENDPROC(sk_load_byte_msh)
25201 @@ -91,6 +96,7 @@ bpf_error:
25202 xor %eax,%eax
25203 mov -8(%rbp),%rbx
25204 leaveq
25205 + pax_force_retaddr
25206 ret
25207
25208 /* rsi contains offset and can be scratched */
25209 @@ -113,6 +119,7 @@ bpf_slow_path_word:
25210 js bpf_error
25211 mov -12(%rbp),%eax
25212 bswap %eax
25213 + pax_force_retaddr
25214 ret
25215
25216 bpf_slow_path_half:
25217 @@ -121,12 +128,14 @@ bpf_slow_path_half:
25218 mov -12(%rbp),%ax
25219 rol $8,%ax
25220 movzwl %ax,%eax
25221 + pax_force_retaddr
25222 ret
25223
25224 bpf_slow_path_byte:
25225 bpf_slow_path_common(1)
25226 js bpf_error
25227 movzbl -12(%rbp),%eax
25228 + pax_force_retaddr
25229 ret
25230
25231 bpf_slow_path_byte_msh:
25232 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25233 and $15,%al
25234 shl $2,%al
25235 xchg %eax,%ebx
25236 + pax_force_retaddr
25237 ret
25238 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25239 index 7c1b765..8c072c6 100644
25240 --- a/arch/x86/net/bpf_jit_comp.c
25241 +++ b/arch/x86/net/bpf_jit_comp.c
25242 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25243 set_fs(old_fs);
25244 }
25245
25246 +struct bpf_jit_work {
25247 + struct work_struct work;
25248 + void *image;
25249 +};
25250
25251 void bpf_jit_compile(struct sk_filter *fp)
25252 {
25253 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25254 if (addrs == NULL)
25255 return;
25256
25257 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25258 + if (!fp->work)
25259 + goto out;
25260 +
25261 /* Before first pass, make a rough estimation of addrs[]
25262 * each bpf instruction is translated to less than 64 bytes
25263 */
25264 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25265 func = sk_load_word;
25266 common_load: seen |= SEEN_DATAREF;
25267 if ((int)K < 0)
25268 - goto out;
25269 + goto error;
25270 t_offset = func - (image + addrs[i]);
25271 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25272 EMIT1_off32(0xe8, t_offset); /* call */
25273 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25274 break;
25275 default:
25276 /* hmm, too complex filter, give up with jit compiler */
25277 - goto out;
25278 + goto error;
25279 }
25280 ilen = prog - temp;
25281 if (image) {
25282 if (unlikely(proglen + ilen > oldproglen)) {
25283 pr_err("bpb_jit_compile fatal error\n");
25284 - kfree(addrs);
25285 - module_free(NULL, image);
25286 - return;
25287 + module_free_exec(NULL, image);
25288 + goto error;
25289 }
25290 + pax_open_kernel();
25291 memcpy(image + proglen, temp, ilen);
25292 + pax_close_kernel();
25293 }
25294 proglen += ilen;
25295 addrs[i] = proglen;
25296 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25297 break;
25298 }
25299 if (proglen == oldproglen) {
25300 - image = module_alloc(max_t(unsigned int,
25301 - proglen,
25302 - sizeof(struct work_struct)));
25303 + image = module_alloc_exec(proglen);
25304 if (!image)
25305 - goto out;
25306 + goto error;
25307 }
25308 oldproglen = proglen;
25309 }
25310 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25311 bpf_flush_icache(image, image + proglen);
25312
25313 fp->bpf_func = (void *)image;
25314 - }
25315 + } else
25316 +error:
25317 + kfree(fp->work);
25318 +
25319 out:
25320 kfree(addrs);
25321 return;
25322 @@ -645,18 +655,20 @@ out:
25323
25324 static void jit_free_defer(struct work_struct *arg)
25325 {
25326 - module_free(NULL, arg);
25327 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25328 + kfree(arg);
25329 }
25330
25331 /* run from softirq, we must use a work_struct to call
25332 - * module_free() from process context
25333 + * module_free_exec() from process context
25334 */
25335 void bpf_jit_free(struct sk_filter *fp)
25336 {
25337 if (fp->bpf_func != sk_run_filter) {
25338 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25339 + struct work_struct *work = &fp->work->work;
25340
25341 INIT_WORK(work, jit_free_defer);
25342 + fp->work->image = fp->bpf_func;
25343 schedule_work(work);
25344 }
25345 }
25346 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25347 index bff89df..377758a 100644
25348 --- a/arch/x86/oprofile/backtrace.c
25349 +++ b/arch/x86/oprofile/backtrace.c
25350 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25351 struct stack_frame_ia32 *fp;
25352 unsigned long bytes;
25353
25354 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25355 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25356 if (bytes != sizeof(bufhead))
25357 return NULL;
25358
25359 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25360 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25361
25362 oprofile_add_trace(bufhead[0].return_address);
25363
25364 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25365 struct stack_frame bufhead[2];
25366 unsigned long bytes;
25367
25368 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25369 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25370 if (bytes != sizeof(bufhead))
25371 return NULL;
25372
25373 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25374 {
25375 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25376
25377 - if (!user_mode_vm(regs)) {
25378 + if (!user_mode(regs)) {
25379 unsigned long stack = kernel_stack_pointer(regs);
25380 if (depth)
25381 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25382 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25383 index cb29191..036766d 100644
25384 --- a/arch/x86/pci/mrst.c
25385 +++ b/arch/x86/pci/mrst.c
25386 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25387 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25388 pci_mmcfg_late_init();
25389 pcibios_enable_irq = mrst_pci_irq_enable;
25390 - pci_root_ops = pci_mrst_ops;
25391 + pax_open_kernel();
25392 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25393 + pax_close_kernel();
25394 /* Continue with standard init */
25395 return 1;
25396 }
25397 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25398 index da8fe05..7ee6704 100644
25399 --- a/arch/x86/pci/pcbios.c
25400 +++ b/arch/x86/pci/pcbios.c
25401 @@ -79,50 +79,93 @@ union bios32 {
25402 static struct {
25403 unsigned long address;
25404 unsigned short segment;
25405 -} bios32_indirect = { 0, __KERNEL_CS };
25406 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25407
25408 /*
25409 * Returns the entry point for the given service, NULL on error
25410 */
25411
25412 -static unsigned long bios32_service(unsigned long service)
25413 +static unsigned long __devinit bios32_service(unsigned long service)
25414 {
25415 unsigned char return_code; /* %al */
25416 unsigned long address; /* %ebx */
25417 unsigned long length; /* %ecx */
25418 unsigned long entry; /* %edx */
25419 unsigned long flags;
25420 + struct desc_struct d, *gdt;
25421
25422 local_irq_save(flags);
25423 - __asm__("lcall *(%%edi); cld"
25424 +
25425 + gdt = get_cpu_gdt_table(smp_processor_id());
25426 +
25427 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25428 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25429 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25430 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25431 +
25432 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25433 : "=a" (return_code),
25434 "=b" (address),
25435 "=c" (length),
25436 "=d" (entry)
25437 : "0" (service),
25438 "1" (0),
25439 - "D" (&bios32_indirect));
25440 + "D" (&bios32_indirect),
25441 + "r"(__PCIBIOS_DS)
25442 + : "memory");
25443 +
25444 + pax_open_kernel();
25445 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25446 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25447 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25448 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25449 + pax_close_kernel();
25450 +
25451 local_irq_restore(flags);
25452
25453 switch (return_code) {
25454 - case 0:
25455 - return address + entry;
25456 - case 0x80: /* Not present */
25457 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25458 - return 0;
25459 - default: /* Shouldn't happen */
25460 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25461 - service, return_code);
25462 + case 0: {
25463 + int cpu;
25464 + unsigned char flags;
25465 +
25466 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25467 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25468 + printk(KERN_WARNING "bios32_service: not valid\n");
25469 return 0;
25470 + }
25471 + address = address + PAGE_OFFSET;
25472 + length += 16UL; /* some BIOSs underreport this... */
25473 + flags = 4;
25474 + if (length >= 64*1024*1024) {
25475 + length >>= PAGE_SHIFT;
25476 + flags |= 8;
25477 + }
25478 +
25479 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25480 + gdt = get_cpu_gdt_table(cpu);
25481 + pack_descriptor(&d, address, length, 0x9b, flags);
25482 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25483 + pack_descriptor(&d, address, length, 0x93, flags);
25484 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25485 + }
25486 + return entry;
25487 + }
25488 + case 0x80: /* Not present */
25489 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25490 + return 0;
25491 + default: /* Shouldn't happen */
25492 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25493 + service, return_code);
25494 + return 0;
25495 }
25496 }
25497
25498 static struct {
25499 unsigned long address;
25500 unsigned short segment;
25501 -} pci_indirect = { 0, __KERNEL_CS };
25502 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25503
25504 -static int pci_bios_present;
25505 +static int pci_bios_present __read_only;
25506
25507 static int __devinit check_pcibios(void)
25508 {
25509 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25510 unsigned long flags, pcibios_entry;
25511
25512 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25513 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25514 + pci_indirect.address = pcibios_entry;
25515
25516 local_irq_save(flags);
25517 - __asm__(
25518 - "lcall *(%%edi); cld\n\t"
25519 + __asm__("movw %w6, %%ds\n\t"
25520 + "lcall *%%ss:(%%edi); cld\n\t"
25521 + "push %%ss\n\t"
25522 + "pop %%ds\n\t"
25523 "jc 1f\n\t"
25524 "xor %%ah, %%ah\n"
25525 "1:"
25526 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25527 "=b" (ebx),
25528 "=c" (ecx)
25529 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25530 - "D" (&pci_indirect)
25531 + "D" (&pci_indirect),
25532 + "r" (__PCIBIOS_DS)
25533 : "memory");
25534 local_irq_restore(flags);
25535
25536 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25537
25538 switch (len) {
25539 case 1:
25540 - __asm__("lcall *(%%esi); cld\n\t"
25541 + __asm__("movw %w6, %%ds\n\t"
25542 + "lcall *%%ss:(%%esi); cld\n\t"
25543 + "push %%ss\n\t"
25544 + "pop %%ds\n\t"
25545 "jc 1f\n\t"
25546 "xor %%ah, %%ah\n"
25547 "1:"
25548 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25549 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25550 "b" (bx),
25551 "D" ((long)reg),
25552 - "S" (&pci_indirect));
25553 + "S" (&pci_indirect),
25554 + "r" (__PCIBIOS_DS));
25555 /*
25556 * Zero-extend the result beyond 8 bits, do not trust the
25557 * BIOS having done it:
25558 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25559 *value &= 0xff;
25560 break;
25561 case 2:
25562 - __asm__("lcall *(%%esi); cld\n\t"
25563 + __asm__("movw %w6, %%ds\n\t"
25564 + "lcall *%%ss:(%%esi); cld\n\t"
25565 + "push %%ss\n\t"
25566 + "pop %%ds\n\t"
25567 "jc 1f\n\t"
25568 "xor %%ah, %%ah\n"
25569 "1:"
25570 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25571 : "1" (PCIBIOS_READ_CONFIG_WORD),
25572 "b" (bx),
25573 "D" ((long)reg),
25574 - "S" (&pci_indirect));
25575 + "S" (&pci_indirect),
25576 + "r" (__PCIBIOS_DS));
25577 /*
25578 * Zero-extend the result beyond 16 bits, do not trust the
25579 * BIOS having done it:
25580 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25581 *value &= 0xffff;
25582 break;
25583 case 4:
25584 - __asm__("lcall *(%%esi); cld\n\t"
25585 + __asm__("movw %w6, %%ds\n\t"
25586 + "lcall *%%ss:(%%esi); cld\n\t"
25587 + "push %%ss\n\t"
25588 + "pop %%ds\n\t"
25589 "jc 1f\n\t"
25590 "xor %%ah, %%ah\n"
25591 "1:"
25592 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25593 : "1" (PCIBIOS_READ_CONFIG_DWORD),
25594 "b" (bx),
25595 "D" ((long)reg),
25596 - "S" (&pci_indirect));
25597 + "S" (&pci_indirect),
25598 + "r" (__PCIBIOS_DS));
25599 break;
25600 }
25601
25602 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25603
25604 switch (len) {
25605 case 1:
25606 - __asm__("lcall *(%%esi); cld\n\t"
25607 + __asm__("movw %w6, %%ds\n\t"
25608 + "lcall *%%ss:(%%esi); cld\n\t"
25609 + "push %%ss\n\t"
25610 + "pop %%ds\n\t"
25611 "jc 1f\n\t"
25612 "xor %%ah, %%ah\n"
25613 "1:"
25614 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25615 "c" (value),
25616 "b" (bx),
25617 "D" ((long)reg),
25618 - "S" (&pci_indirect));
25619 + "S" (&pci_indirect),
25620 + "r" (__PCIBIOS_DS));
25621 break;
25622 case 2:
25623 - __asm__("lcall *(%%esi); cld\n\t"
25624 + __asm__("movw %w6, %%ds\n\t"
25625 + "lcall *%%ss:(%%esi); cld\n\t"
25626 + "push %%ss\n\t"
25627 + "pop %%ds\n\t"
25628 "jc 1f\n\t"
25629 "xor %%ah, %%ah\n"
25630 "1:"
25631 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25632 "c" (value),
25633 "b" (bx),
25634 "D" ((long)reg),
25635 - "S" (&pci_indirect));
25636 + "S" (&pci_indirect),
25637 + "r" (__PCIBIOS_DS));
25638 break;
25639 case 4:
25640 - __asm__("lcall *(%%esi); cld\n\t"
25641 + __asm__("movw %w6, %%ds\n\t"
25642 + "lcall *%%ss:(%%esi); cld\n\t"
25643 + "push %%ss\n\t"
25644 + "pop %%ds\n\t"
25645 "jc 1f\n\t"
25646 "xor %%ah, %%ah\n"
25647 "1:"
25648 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25649 "c" (value),
25650 "b" (bx),
25651 "D" ((long)reg),
25652 - "S" (&pci_indirect));
25653 + "S" (&pci_indirect),
25654 + "r" (__PCIBIOS_DS));
25655 break;
25656 }
25657
25658 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25659
25660 DBG("PCI: Fetching IRQ routing table... ");
25661 __asm__("push %%es\n\t"
25662 + "movw %w8, %%ds\n\t"
25663 "push %%ds\n\t"
25664 "pop %%es\n\t"
25665 - "lcall *(%%esi); cld\n\t"
25666 + "lcall *%%ss:(%%esi); cld\n\t"
25667 "pop %%es\n\t"
25668 + "push %%ss\n\t"
25669 + "pop %%ds\n"
25670 "jc 1f\n\t"
25671 "xor %%ah, %%ah\n"
25672 "1:"
25673 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25674 "1" (0),
25675 "D" ((long) &opt),
25676 "S" (&pci_indirect),
25677 - "m" (opt)
25678 + "m" (opt),
25679 + "r" (__PCIBIOS_DS)
25680 : "memory");
25681 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
25682 if (ret & 0xff00)
25683 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25684 {
25685 int ret;
25686
25687 - __asm__("lcall *(%%esi); cld\n\t"
25688 + __asm__("movw %w5, %%ds\n\t"
25689 + "lcall *%%ss:(%%esi); cld\n\t"
25690 + "push %%ss\n\t"
25691 + "pop %%ds\n"
25692 "jc 1f\n\t"
25693 "xor %%ah, %%ah\n"
25694 "1:"
25695 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25696 : "0" (PCIBIOS_SET_PCI_HW_INT),
25697 "b" ((dev->bus->number << 8) | dev->devfn),
25698 "c" ((irq << 8) | (pin + 10)),
25699 - "S" (&pci_indirect));
25700 + "S" (&pci_indirect),
25701 + "r" (__PCIBIOS_DS));
25702 return !(ret & 0xff00);
25703 }
25704 EXPORT_SYMBOL(pcibios_set_irq_routing);
25705 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
25706 index 40e4469..1ab536e 100644
25707 --- a/arch/x86/platform/efi/efi_32.c
25708 +++ b/arch/x86/platform/efi/efi_32.c
25709 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
25710 {
25711 struct desc_ptr gdt_descr;
25712
25713 +#ifdef CONFIG_PAX_KERNEXEC
25714 + struct desc_struct d;
25715 +#endif
25716 +
25717 local_irq_save(efi_rt_eflags);
25718
25719 load_cr3(initial_page_table);
25720 __flush_tlb_all();
25721
25722 +#ifdef CONFIG_PAX_KERNEXEC
25723 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
25724 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25725 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
25726 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25727 +#endif
25728 +
25729 gdt_descr.address = __pa(get_cpu_gdt_table(0));
25730 gdt_descr.size = GDT_SIZE - 1;
25731 load_gdt(&gdt_descr);
25732 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
25733 {
25734 struct desc_ptr gdt_descr;
25735
25736 +#ifdef CONFIG_PAX_KERNEXEC
25737 + struct desc_struct d;
25738 +
25739 + memset(&d, 0, sizeof d);
25740 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25741 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25742 +#endif
25743 +
25744 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
25745 gdt_descr.size = GDT_SIZE - 1;
25746 load_gdt(&gdt_descr);
25747 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
25748 index fbe66e6..c5c0dd2 100644
25749 --- a/arch/x86/platform/efi/efi_stub_32.S
25750 +++ b/arch/x86/platform/efi/efi_stub_32.S
25751 @@ -6,7 +6,9 @@
25752 */
25753
25754 #include <linux/linkage.h>
25755 +#include <linux/init.h>
25756 #include <asm/page_types.h>
25757 +#include <asm/segment.h>
25758
25759 /*
25760 * efi_call_phys(void *, ...) is a function with variable parameters.
25761 @@ -20,7 +22,7 @@
25762 * service functions will comply with gcc calling convention, too.
25763 */
25764
25765 -.text
25766 +__INIT
25767 ENTRY(efi_call_phys)
25768 /*
25769 * 0. The function can only be called in Linux kernel. So CS has been
25770 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
25771 * The mapping of lower virtual memory has been created in prelog and
25772 * epilog.
25773 */
25774 - movl $1f, %edx
25775 - subl $__PAGE_OFFSET, %edx
25776 - jmp *%edx
25777 + movl $(__KERNEXEC_EFI_DS), %edx
25778 + mov %edx, %ds
25779 + mov %edx, %es
25780 + mov %edx, %ss
25781 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
25782 1:
25783
25784 /*
25785 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
25786 * parameter 2, ..., param n. To make things easy, we save the return
25787 * address of efi_call_phys in a global variable.
25788 */
25789 - popl %edx
25790 - movl %edx, saved_return_addr
25791 - /* get the function pointer into ECX*/
25792 - popl %ecx
25793 - movl %ecx, efi_rt_function_ptr
25794 - movl $2f, %edx
25795 - subl $__PAGE_OFFSET, %edx
25796 - pushl %edx
25797 + popl (saved_return_addr)
25798 + popl (efi_rt_function_ptr)
25799
25800 /*
25801 * 3. Clear PG bit in %CR0.
25802 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
25803 /*
25804 * 5. Call the physical function.
25805 */
25806 - jmp *%ecx
25807 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
25808
25809 -2:
25810 /*
25811 * 6. After EFI runtime service returns, control will return to
25812 * following instruction. We'd better readjust stack pointer first.
25813 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
25814 movl %cr0, %edx
25815 orl $0x80000000, %edx
25816 movl %edx, %cr0
25817 - jmp 1f
25818 -1:
25819 +
25820 /*
25821 * 8. Now restore the virtual mode from flat mode by
25822 * adding EIP with PAGE_OFFSET.
25823 */
25824 - movl $1f, %edx
25825 - jmp *%edx
25826 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
25827 1:
25828 + movl $(__KERNEL_DS), %edx
25829 + mov %edx, %ds
25830 + mov %edx, %es
25831 + mov %edx, %ss
25832
25833 /*
25834 * 9. Balance the stack. And because EAX contain the return value,
25835 * we'd better not clobber it.
25836 */
25837 - leal efi_rt_function_ptr, %edx
25838 - movl (%edx), %ecx
25839 - pushl %ecx
25840 + pushl (efi_rt_function_ptr)
25841
25842 /*
25843 - * 10. Push the saved return address onto the stack and return.
25844 + * 10. Return to the saved return address.
25845 */
25846 - leal saved_return_addr, %edx
25847 - movl (%edx), %ecx
25848 - pushl %ecx
25849 - ret
25850 + jmpl *(saved_return_addr)
25851 ENDPROC(efi_call_phys)
25852 .previous
25853
25854 -.data
25855 +__INITDATA
25856 saved_return_addr:
25857 .long 0
25858 efi_rt_function_ptr:
25859 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
25860 index 4c07cca..2c8427d 100644
25861 --- a/arch/x86/platform/efi/efi_stub_64.S
25862 +++ b/arch/x86/platform/efi/efi_stub_64.S
25863 @@ -7,6 +7,7 @@
25864 */
25865
25866 #include <linux/linkage.h>
25867 +#include <asm/alternative-asm.h>
25868
25869 #define SAVE_XMM \
25870 mov %rsp, %rax; \
25871 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
25872 call *%rdi
25873 addq $32, %rsp
25874 RESTORE_XMM
25875 + pax_force_retaddr 0, 1
25876 ret
25877 ENDPROC(efi_call0)
25878
25879 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
25880 call *%rdi
25881 addq $32, %rsp
25882 RESTORE_XMM
25883 + pax_force_retaddr 0, 1
25884 ret
25885 ENDPROC(efi_call1)
25886
25887 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
25888 call *%rdi
25889 addq $32, %rsp
25890 RESTORE_XMM
25891 + pax_force_retaddr 0, 1
25892 ret
25893 ENDPROC(efi_call2)
25894
25895 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
25896 call *%rdi
25897 addq $32, %rsp
25898 RESTORE_XMM
25899 + pax_force_retaddr 0, 1
25900 ret
25901 ENDPROC(efi_call3)
25902
25903 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
25904 call *%rdi
25905 addq $32, %rsp
25906 RESTORE_XMM
25907 + pax_force_retaddr 0, 1
25908 ret
25909 ENDPROC(efi_call4)
25910
25911 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
25912 call *%rdi
25913 addq $48, %rsp
25914 RESTORE_XMM
25915 + pax_force_retaddr 0, 1
25916 ret
25917 ENDPROC(efi_call5)
25918
25919 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
25920 call *%rdi
25921 addq $48, %rsp
25922 RESTORE_XMM
25923 + pax_force_retaddr 0, 1
25924 ret
25925 ENDPROC(efi_call6)
25926 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
25927 index 475e2cd..1b8e708 100644
25928 --- a/arch/x86/platform/mrst/mrst.c
25929 +++ b/arch/x86/platform/mrst/mrst.c
25930 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
25931 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
25932 int sfi_mrtc_num;
25933
25934 -static void mrst_power_off(void)
25935 +static __noreturn void mrst_power_off(void)
25936 {
25937 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25938 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
25939 + BUG();
25940 }
25941
25942 -static void mrst_reboot(void)
25943 +static __noreturn void mrst_reboot(void)
25944 {
25945 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25946 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
25947 else
25948 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
25949 + BUG();
25950 }
25951
25952 /* parse all the mtimer info to a static mtimer array */
25953 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
25954 index f10c0af..3ec1f95 100644
25955 --- a/arch/x86/power/cpu.c
25956 +++ b/arch/x86/power/cpu.c
25957 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
25958 static void fix_processor_context(void)
25959 {
25960 int cpu = smp_processor_id();
25961 - struct tss_struct *t = &per_cpu(init_tss, cpu);
25962 + struct tss_struct *t = init_tss + cpu;
25963
25964 set_tss_desc(cpu, t); /*
25965 * This just modifies memory; should not be
25966 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
25967 */
25968
25969 #ifdef CONFIG_X86_64
25970 + pax_open_kernel();
25971 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
25972 + pax_close_kernel();
25973
25974 syscall_init(); /* This sets MSR_*STAR and related */
25975 #endif
25976 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
25977 index 5d17950..2253fc9 100644
25978 --- a/arch/x86/vdso/Makefile
25979 +++ b/arch/x86/vdso/Makefile
25980 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
25981 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
25982 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
25983
25984 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25985 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25986 GCOV_PROFILE := n
25987
25988 #
25989 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
25990 index 468d591..8e80a0a 100644
25991 --- a/arch/x86/vdso/vdso32-setup.c
25992 +++ b/arch/x86/vdso/vdso32-setup.c
25993 @@ -25,6 +25,7 @@
25994 #include <asm/tlbflush.h>
25995 #include <asm/vdso.h>
25996 #include <asm/proto.h>
25997 +#include <asm/mman.h>
25998
25999 enum {
26000 VDSO_DISABLED = 0,
26001 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26002 void enable_sep_cpu(void)
26003 {
26004 int cpu = get_cpu();
26005 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26006 + struct tss_struct *tss = init_tss + cpu;
26007
26008 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26009 put_cpu();
26010 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26011 gate_vma.vm_start = FIXADDR_USER_START;
26012 gate_vma.vm_end = FIXADDR_USER_END;
26013 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26014 - gate_vma.vm_page_prot = __P101;
26015 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26016 /*
26017 * Make sure the vDSO gets into every core dump.
26018 * Dumping its contents makes post-mortem fully interpretable later
26019 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26020 if (compat)
26021 addr = VDSO_HIGH_BASE;
26022 else {
26023 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26024 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26025 if (IS_ERR_VALUE(addr)) {
26026 ret = addr;
26027 goto up_fail;
26028 }
26029 }
26030
26031 - current->mm->context.vdso = (void *)addr;
26032 + current->mm->context.vdso = addr;
26033
26034 if (compat_uses_vma || !compat) {
26035 /*
26036 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26037 }
26038
26039 current_thread_info()->sysenter_return =
26040 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26041 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26042
26043 up_fail:
26044 if (ret)
26045 - current->mm->context.vdso = NULL;
26046 + current->mm->context.vdso = 0;
26047
26048 up_write(&mm->mmap_sem);
26049
26050 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26051
26052 const char *arch_vma_name(struct vm_area_struct *vma)
26053 {
26054 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26055 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26056 return "[vdso]";
26057 +
26058 +#ifdef CONFIG_PAX_SEGMEXEC
26059 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26060 + return "[vdso]";
26061 +#endif
26062 +
26063 return NULL;
26064 }
26065
26066 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26067 * Check to see if the corresponding task was created in compat vdso
26068 * mode.
26069 */
26070 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26071 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26072 return &gate_vma;
26073 return NULL;
26074 }
26075 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26076 index 153407c..611cba9 100644
26077 --- a/arch/x86/vdso/vma.c
26078 +++ b/arch/x86/vdso/vma.c
26079 @@ -16,8 +16,6 @@
26080 #include <asm/vdso.h>
26081 #include <asm/page.h>
26082
26083 -unsigned int __read_mostly vdso_enabled = 1;
26084 -
26085 extern char vdso_start[], vdso_end[];
26086 extern unsigned short vdso_sync_cpuid;
26087
26088 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26089 * unaligned here as a result of stack start randomization.
26090 */
26091 addr = PAGE_ALIGN(addr);
26092 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26093
26094 return addr;
26095 }
26096 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26097 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26098 {
26099 struct mm_struct *mm = current->mm;
26100 - unsigned long addr;
26101 + unsigned long addr = 0;
26102 int ret;
26103
26104 - if (!vdso_enabled)
26105 - return 0;
26106 -
26107 down_write(&mm->mmap_sem);
26108 +
26109 +#ifdef CONFIG_PAX_RANDMMAP
26110 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26111 +#endif
26112 +
26113 addr = vdso_addr(mm->start_stack, vdso_size);
26114 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26115 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26116 if (IS_ERR_VALUE(addr)) {
26117 ret = addr;
26118 goto up_fail;
26119 }
26120
26121 - current->mm->context.vdso = (void *)addr;
26122 + mm->context.vdso = addr;
26123
26124 ret = install_special_mapping(mm, addr, vdso_size,
26125 VM_READ|VM_EXEC|
26126 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26127 VM_ALWAYSDUMP,
26128 vdso_pages);
26129 - if (ret) {
26130 - current->mm->context.vdso = NULL;
26131 - goto up_fail;
26132 - }
26133 +
26134 + if (ret)
26135 + mm->context.vdso = 0;
26136
26137 up_fail:
26138 up_write(&mm->mmap_sem);
26139 return ret;
26140 }
26141 -
26142 -static __init int vdso_setup(char *s)
26143 -{
26144 - vdso_enabled = simple_strtoul(s, NULL, 0);
26145 - return 0;
26146 -}
26147 -__setup("vdso=", vdso_setup);
26148 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26149 index 4172af8..2c8ed7f 100644
26150 --- a/arch/x86/xen/enlighten.c
26151 +++ b/arch/x86/xen/enlighten.c
26152 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26153
26154 struct shared_info xen_dummy_shared_info;
26155
26156 -void *xen_initial_gdt;
26157 -
26158 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26159 __read_mostly int xen_have_vector_callback;
26160 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26161 @@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26162 #endif
26163 };
26164
26165 -static void xen_reboot(int reason)
26166 +static __noreturn void xen_reboot(int reason)
26167 {
26168 struct sched_shutdown r = { .reason = reason };
26169
26170 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
26171 - BUG();
26172 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
26173 + BUG();
26174 }
26175
26176 -static void xen_restart(char *msg)
26177 +static __noreturn void xen_restart(char *msg)
26178 {
26179 xen_reboot(SHUTDOWN_reboot);
26180 }
26181
26182 -static void xen_emergency_restart(void)
26183 +static __noreturn void xen_emergency_restart(void)
26184 {
26185 xen_reboot(SHUTDOWN_reboot);
26186 }
26187
26188 -static void xen_machine_halt(void)
26189 +static __noreturn void xen_machine_halt(void)
26190 {
26191 xen_reboot(SHUTDOWN_poweroff);
26192 }
26193
26194 -static void xen_machine_power_off(void)
26195 +static __noreturn void xen_machine_power_off(void)
26196 {
26197 if (pm_power_off)
26198 pm_power_off();
26199 @@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void)
26200 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26201
26202 /* Work out if we support NX */
26203 - x86_configure_nx();
26204 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26205 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26206 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26207 + unsigned l, h;
26208 +
26209 + __supported_pte_mask |= _PAGE_NX;
26210 + rdmsr(MSR_EFER, l, h);
26211 + l |= EFER_NX;
26212 + wrmsr(MSR_EFER, l, h);
26213 + }
26214 +#endif
26215
26216 xen_setup_features();
26217
26218 @@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void)
26219
26220 machine_ops = xen_machine_ops;
26221
26222 - /*
26223 - * The only reliable way to retain the initial address of the
26224 - * percpu gdt_page is to remember it here, so we can go and
26225 - * mark it RW later, when the initial percpu area is freed.
26226 - */
26227 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26228 -
26229 xen_smp_init();
26230
26231 #ifdef CONFIG_ACPI_NUMA
26232 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26233 index 95c1cf6..4bfa5be 100644
26234 --- a/arch/x86/xen/mmu.c
26235 +++ b/arch/x86/xen/mmu.c
26236 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26237 convert_pfn_mfn(init_level4_pgt);
26238 convert_pfn_mfn(level3_ident_pgt);
26239 convert_pfn_mfn(level3_kernel_pgt);
26240 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26241 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26242 + convert_pfn_mfn(level3_vmemmap_pgt);
26243
26244 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26245 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26246 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26247 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26248 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26249 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26250 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26251 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26252 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26253 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26254 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26255 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26256 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26257
26258 @@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void)
26259 pv_mmu_ops.set_pud = xen_set_pud;
26260 #if PAGETABLE_LEVELS == 4
26261 pv_mmu_ops.set_pgd = xen_set_pgd;
26262 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26263 #endif
26264
26265 /* This will work as long as patching hasn't happened yet
26266 @@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26267 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26268 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26269 .set_pgd = xen_set_pgd_hyper,
26270 + .set_pgd_batched = xen_set_pgd_hyper,
26271
26272 .alloc_pud = xen_alloc_pmd_init,
26273 .release_pud = xen_release_pmd_init,
26274 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26275 index 501d4e0..e877605 100644
26276 --- a/arch/x86/xen/smp.c
26277 +++ b/arch/x86/xen/smp.c
26278 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26279 {
26280 BUG_ON(smp_processor_id() != 0);
26281 native_smp_prepare_boot_cpu();
26282 -
26283 - /* We've switched to the "real" per-cpu gdt, so make sure the
26284 - old memory can be recycled */
26285 - make_lowmem_page_readwrite(xen_initial_gdt);
26286 -
26287 xen_filter_cpu_maps();
26288 xen_setup_vcpu_info_placement();
26289 }
26290 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26291 gdt = get_cpu_gdt_table(cpu);
26292
26293 ctxt->flags = VGCF_IN_KERNEL;
26294 - ctxt->user_regs.ds = __USER_DS;
26295 - ctxt->user_regs.es = __USER_DS;
26296 + ctxt->user_regs.ds = __KERNEL_DS;
26297 + ctxt->user_regs.es = __KERNEL_DS;
26298 ctxt->user_regs.ss = __KERNEL_DS;
26299 #ifdef CONFIG_X86_32
26300 ctxt->user_regs.fs = __KERNEL_PERCPU;
26301 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26302 + savesegment(gs, ctxt->user_regs.gs);
26303 #else
26304 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26305 #endif
26306 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26307 int rc;
26308
26309 per_cpu(current_task, cpu) = idle;
26310 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26311 #ifdef CONFIG_X86_32
26312 irq_ctx_init(cpu);
26313 #else
26314 clear_tsk_thread_flag(idle, TIF_FORK);
26315 - per_cpu(kernel_stack, cpu) =
26316 - (unsigned long)task_stack_page(idle) -
26317 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26318 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26319 #endif
26320 xen_setup_runstate_info(cpu);
26321 xen_setup_timer(cpu);
26322 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26323 index b040b0e..8cc4fe0 100644
26324 --- a/arch/x86/xen/xen-asm_32.S
26325 +++ b/arch/x86/xen/xen-asm_32.S
26326 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
26327 ESP_OFFSET=4 # bytes pushed onto stack
26328
26329 /*
26330 - * Store vcpu_info pointer for easy access. Do it this way to
26331 - * avoid having to reload %fs
26332 + * Store vcpu_info pointer for easy access.
26333 */
26334 #ifdef CONFIG_SMP
26335 - GET_THREAD_INFO(%eax)
26336 - movl TI_cpu(%eax), %eax
26337 - movl __per_cpu_offset(,%eax,4), %eax
26338 - mov xen_vcpu(%eax), %eax
26339 + push %fs
26340 + mov $(__KERNEL_PERCPU), %eax
26341 + mov %eax, %fs
26342 + mov PER_CPU_VAR(xen_vcpu), %eax
26343 + pop %fs
26344 #else
26345 movl xen_vcpu, %eax
26346 #endif
26347 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26348 index aaa7291..3f77960 100644
26349 --- a/arch/x86/xen/xen-head.S
26350 +++ b/arch/x86/xen/xen-head.S
26351 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
26352 #ifdef CONFIG_X86_32
26353 mov %esi,xen_start_info
26354 mov $init_thread_union+THREAD_SIZE,%esp
26355 +#ifdef CONFIG_SMP
26356 + movl $cpu_gdt_table,%edi
26357 + movl $__per_cpu_load,%eax
26358 + movw %ax,__KERNEL_PERCPU + 2(%edi)
26359 + rorl $16,%eax
26360 + movb %al,__KERNEL_PERCPU + 4(%edi)
26361 + movb %ah,__KERNEL_PERCPU + 7(%edi)
26362 + movl $__per_cpu_end - 1,%eax
26363 + subl $__per_cpu_start,%eax
26364 + movw %ax,__KERNEL_PERCPU + 0(%edi)
26365 +#endif
26366 #else
26367 mov %rsi,xen_start_info
26368 mov $init_thread_union+THREAD_SIZE,%rsp
26369 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26370 index b095739..8c17bcd 100644
26371 --- a/arch/x86/xen/xen-ops.h
26372 +++ b/arch/x86/xen/xen-ops.h
26373 @@ -10,8 +10,6 @@
26374 extern const char xen_hypervisor_callback[];
26375 extern const char xen_failsafe_callback[];
26376
26377 -extern void *xen_initial_gdt;
26378 -
26379 struct trap_info;
26380 void xen_copy_trap_info(struct trap_info *traps);
26381
26382 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26383 index 525bd3d..ef888b1 100644
26384 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
26385 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26386 @@ -119,9 +119,9 @@
26387 ----------------------------------------------------------------------*/
26388
26389 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26390 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26391 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26392 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26393 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26394
26395 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26396 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26397 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26398 index 2f33760..835e50a 100644
26399 --- a/arch/xtensa/variants/fsf/include/variant/core.h
26400 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
26401 @@ -11,6 +11,7 @@
26402 #ifndef _XTENSA_CORE_H
26403 #define _XTENSA_CORE_H
26404
26405 +#include <linux/const.h>
26406
26407 /****************************************************************************
26408 Parameters Useful for Any Code, USER or PRIVILEGED
26409 @@ -112,9 +113,9 @@
26410 ----------------------------------------------------------------------*/
26411
26412 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26413 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26414 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26415 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26416 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26417
26418 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26419 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26420 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26421 index af00795..2bb8105 100644
26422 --- a/arch/xtensa/variants/s6000/include/variant/core.h
26423 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
26424 @@ -11,6 +11,7 @@
26425 #ifndef _XTENSA_CORE_CONFIGURATION_H
26426 #define _XTENSA_CORE_CONFIGURATION_H
26427
26428 +#include <linux/const.h>
26429
26430 /****************************************************************************
26431 Parameters Useful for Any Code, USER or PRIVILEGED
26432 @@ -118,9 +119,9 @@
26433 ----------------------------------------------------------------------*/
26434
26435 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26436 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26437 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26438 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26439 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26440
26441 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26442 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26443 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26444 index 58916af..9cb880b 100644
26445 --- a/block/blk-iopoll.c
26446 +++ b/block/blk-iopoll.c
26447 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26448 }
26449 EXPORT_SYMBOL(blk_iopoll_complete);
26450
26451 -static void blk_iopoll_softirq(struct softirq_action *h)
26452 +static void blk_iopoll_softirq(void)
26453 {
26454 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26455 int rearm = 0, budget = blk_iopoll_budget;
26456 diff --git a/block/blk-map.c b/block/blk-map.c
26457 index 623e1cd..ca1e109 100644
26458 --- a/block/blk-map.c
26459 +++ b/block/blk-map.c
26460 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26461 if (!len || !kbuf)
26462 return -EINVAL;
26463
26464 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26465 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26466 if (do_copy)
26467 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26468 else
26469 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26470 index 1366a89..e17f54b 100644
26471 --- a/block/blk-softirq.c
26472 +++ b/block/blk-softirq.c
26473 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26474 * Softirq action handler - move entries to local list and loop over them
26475 * while passing them to the queue registered handler.
26476 */
26477 -static void blk_done_softirq(struct softirq_action *h)
26478 +static void blk_done_softirq(void)
26479 {
26480 struct list_head *cpu_list, local_list;
26481
26482 diff --git a/block/bsg.c b/block/bsg.c
26483 index ff64ae3..593560c 100644
26484 --- a/block/bsg.c
26485 +++ b/block/bsg.c
26486 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26487 struct sg_io_v4 *hdr, struct bsg_device *bd,
26488 fmode_t has_write_perm)
26489 {
26490 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26491 + unsigned char *cmdptr;
26492 +
26493 if (hdr->request_len > BLK_MAX_CDB) {
26494 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26495 if (!rq->cmd)
26496 return -ENOMEM;
26497 - }
26498 + cmdptr = rq->cmd;
26499 + } else
26500 + cmdptr = tmpcmd;
26501
26502 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26503 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26504 hdr->request_len))
26505 return -EFAULT;
26506
26507 + if (cmdptr != rq->cmd)
26508 + memcpy(rq->cmd, cmdptr, hdr->request_len);
26509 +
26510 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26511 if (blk_verify_command(rq->cmd, has_write_perm))
26512 return -EPERM;
26513 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26514 index 7c668c8..db3521c 100644
26515 --- a/block/compat_ioctl.c
26516 +++ b/block/compat_ioctl.c
26517 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26518 err |= __get_user(f->spec1, &uf->spec1);
26519 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26520 err |= __get_user(name, &uf->name);
26521 - f->name = compat_ptr(name);
26522 + f->name = (void __force_kernel *)compat_ptr(name);
26523 if (err) {
26524 err = -EFAULT;
26525 goto out;
26526 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
26527 index 6296b40..417c00f 100644
26528 --- a/block/partitions/efi.c
26529 +++ b/block/partitions/efi.c
26530 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
26531 if (!gpt)
26532 return NULL;
26533
26534 + if (!le32_to_cpu(gpt->num_partition_entries))
26535 + return NULL;
26536 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
26537 + if (!pte)
26538 + return NULL;
26539 +
26540 count = le32_to_cpu(gpt->num_partition_entries) *
26541 le32_to_cpu(gpt->sizeof_partition_entry);
26542 - if (!count)
26543 - return NULL;
26544 - pte = kzalloc(count, GFP_KERNEL);
26545 - if (!pte)
26546 - return NULL;
26547 -
26548 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
26549 (u8 *) pte,
26550 count) < count) {
26551 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26552 index 260fa80..e8f3caf 100644
26553 --- a/block/scsi_ioctl.c
26554 +++ b/block/scsi_ioctl.c
26555 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
26556 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
26557 struct sg_io_hdr *hdr, fmode_t mode)
26558 {
26559 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
26560 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26561 + unsigned char *cmdptr;
26562 +
26563 + if (rq->cmd != rq->__cmd)
26564 + cmdptr = rq->cmd;
26565 + else
26566 + cmdptr = tmpcmd;
26567 +
26568 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
26569 return -EFAULT;
26570 +
26571 + if (cmdptr != rq->cmd)
26572 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
26573 +
26574 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
26575 return -EPERM;
26576
26577 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26578 int err;
26579 unsigned int in_len, out_len, bytes, opcode, cmdlen;
26580 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
26581 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26582 + unsigned char *cmdptr;
26583
26584 if (!sic)
26585 return -EINVAL;
26586 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26587 */
26588 err = -EFAULT;
26589 rq->cmd_len = cmdlen;
26590 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
26591 +
26592 + if (rq->cmd != rq->__cmd)
26593 + cmdptr = rq->cmd;
26594 + else
26595 + cmdptr = tmpcmd;
26596 +
26597 + if (copy_from_user(cmdptr, sic->data, cmdlen))
26598 goto error;
26599
26600 + if (rq->cmd != cmdptr)
26601 + memcpy(rq->cmd, cmdptr, cmdlen);
26602 +
26603 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
26604 goto error;
26605
26606 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
26607 index 671d4d6..5f24030 100644
26608 --- a/crypto/cryptd.c
26609 +++ b/crypto/cryptd.c
26610 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
26611
26612 struct cryptd_blkcipher_request_ctx {
26613 crypto_completion_t complete;
26614 -};
26615 +} __no_const;
26616
26617 struct cryptd_hash_ctx {
26618 struct crypto_shash *child;
26619 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
26620
26621 struct cryptd_aead_request_ctx {
26622 crypto_completion_t complete;
26623 -};
26624 +} __no_const;
26625
26626 static void cryptd_queue_worker(struct work_struct *work);
26627
26628 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
26629 index 5d41894..22021e4 100644
26630 --- a/drivers/acpi/apei/cper.c
26631 +++ b/drivers/acpi/apei/cper.c
26632 @@ -38,12 +38,12 @@
26633 */
26634 u64 cper_next_record_id(void)
26635 {
26636 - static atomic64_t seq;
26637 + static atomic64_unchecked_t seq;
26638
26639 - if (!atomic64_read(&seq))
26640 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
26641 + if (!atomic64_read_unchecked(&seq))
26642 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
26643
26644 - return atomic64_inc_return(&seq);
26645 + return atomic64_inc_return_unchecked(&seq);
26646 }
26647 EXPORT_SYMBOL_GPL(cper_next_record_id);
26648
26649 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
26650 index b258cab..3fb7da7 100644
26651 --- a/drivers/acpi/ec_sys.c
26652 +++ b/drivers/acpi/ec_sys.c
26653 @@ -12,6 +12,7 @@
26654 #include <linux/acpi.h>
26655 #include <linux/debugfs.h>
26656 #include <linux/module.h>
26657 +#include <linux/uaccess.h>
26658 #include "internal.h"
26659
26660 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
26661 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26662 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
26663 */
26664 unsigned int size = EC_SPACE_SIZE;
26665 - u8 *data = (u8 *) buf;
26666 + u8 data;
26667 loff_t init_off = *off;
26668 int err = 0;
26669
26670 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26671 size = count;
26672
26673 while (size) {
26674 - err = ec_read(*off, &data[*off - init_off]);
26675 + err = ec_read(*off, &data);
26676 if (err)
26677 return err;
26678 + if (put_user(data, &buf[*off - init_off]))
26679 + return -EFAULT;
26680 *off += 1;
26681 size--;
26682 }
26683 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26684
26685 unsigned int size = count;
26686 loff_t init_off = *off;
26687 - u8 *data = (u8 *) buf;
26688 int err = 0;
26689
26690 if (*off >= EC_SPACE_SIZE)
26691 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26692 }
26693
26694 while (size) {
26695 - u8 byte_write = data[*off - init_off];
26696 + u8 byte_write;
26697 + if (get_user(byte_write, &buf[*off - init_off]))
26698 + return -EFAULT;
26699 err = ec_write(*off, byte_write);
26700 if (err)
26701 return err;
26702 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
26703 index 251c7b62..000462d 100644
26704 --- a/drivers/acpi/proc.c
26705 +++ b/drivers/acpi/proc.c
26706 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
26707 size_t count, loff_t * ppos)
26708 {
26709 struct list_head *node, *next;
26710 - char strbuf[5];
26711 - char str[5] = "";
26712 - unsigned int len = count;
26713 + char strbuf[5] = {0};
26714
26715 - if (len > 4)
26716 - len = 4;
26717 - if (len < 0)
26718 + if (count > 4)
26719 + count = 4;
26720 + if (copy_from_user(strbuf, buffer, count))
26721 return -EFAULT;
26722 -
26723 - if (copy_from_user(strbuf, buffer, len))
26724 - return -EFAULT;
26725 - strbuf[len] = '\0';
26726 - sscanf(strbuf, "%s", str);
26727 + strbuf[count] = '\0';
26728
26729 mutex_lock(&acpi_device_lock);
26730 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
26731 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
26732 if (!dev->wakeup.flags.valid)
26733 continue;
26734
26735 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
26736 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
26737 if (device_can_wakeup(&dev->dev)) {
26738 bool enable = !device_may_wakeup(&dev->dev);
26739 device_set_wakeup_enable(&dev->dev, enable);
26740 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
26741 index 8ae05ce..7dbbed9 100644
26742 --- a/drivers/acpi/processor_driver.c
26743 +++ b/drivers/acpi/processor_driver.c
26744 @@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
26745 return 0;
26746 #endif
26747
26748 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
26749 + BUG_ON(pr->id >= nr_cpu_ids);
26750
26751 /*
26752 * Buggy BIOS check
26753 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
26754 index c06e0ec..a2c06ba 100644
26755 --- a/drivers/ata/libata-core.c
26756 +++ b/drivers/ata/libata-core.c
26757 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
26758 struct ata_port *ap;
26759 unsigned int tag;
26760
26761 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26762 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26763 ap = qc->ap;
26764
26765 qc->flags = 0;
26766 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
26767 struct ata_port *ap;
26768 struct ata_link *link;
26769
26770 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26771 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26772 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
26773 ap = qc->ap;
26774 link = qc->dev->link;
26775 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26776 return;
26777
26778 spin_lock(&lock);
26779 + pax_open_kernel();
26780
26781 for (cur = ops->inherits; cur; cur = cur->inherits) {
26782 void **inherit = (void **)cur;
26783 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26784 if (IS_ERR(*pp))
26785 *pp = NULL;
26786
26787 - ops->inherits = NULL;
26788 + *(struct ata_port_operations **)&ops->inherits = NULL;
26789
26790 + pax_close_kernel();
26791 spin_unlock(&lock);
26792 }
26793
26794 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
26795 index 048589f..4002b98 100644
26796 --- a/drivers/ata/pata_arasan_cf.c
26797 +++ b/drivers/ata/pata_arasan_cf.c
26798 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
26799 /* Handle platform specific quirks */
26800 if (pdata->quirk) {
26801 if (pdata->quirk & CF_BROKEN_PIO) {
26802 - ap->ops->set_piomode = NULL;
26803 + pax_open_kernel();
26804 + *(void **)&ap->ops->set_piomode = NULL;
26805 + pax_close_kernel();
26806 ap->pio_mask = 0;
26807 }
26808 if (pdata->quirk & CF_BROKEN_MWDMA)
26809 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
26810 index f9b983a..887b9d8 100644
26811 --- a/drivers/atm/adummy.c
26812 +++ b/drivers/atm/adummy.c
26813 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
26814 vcc->pop(vcc, skb);
26815 else
26816 dev_kfree_skb_any(skb);
26817 - atomic_inc(&vcc->stats->tx);
26818 + atomic_inc_unchecked(&vcc->stats->tx);
26819
26820 return 0;
26821 }
26822 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
26823 index f8f41e0..1f987dd 100644
26824 --- a/drivers/atm/ambassador.c
26825 +++ b/drivers/atm/ambassador.c
26826 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
26827 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26828
26829 // VC layer stats
26830 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26831 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26832
26833 // free the descriptor
26834 kfree (tx_descr);
26835 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26836 dump_skb ("<<<", vc, skb);
26837
26838 // VC layer stats
26839 - atomic_inc(&atm_vcc->stats->rx);
26840 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26841 __net_timestamp(skb);
26842 // end of our responsibility
26843 atm_vcc->push (atm_vcc, skb);
26844 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26845 } else {
26846 PRINTK (KERN_INFO, "dropped over-size frame");
26847 // should we count this?
26848 - atomic_inc(&atm_vcc->stats->rx_drop);
26849 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26850 }
26851
26852 } else {
26853 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
26854 }
26855
26856 if (check_area (skb->data, skb->len)) {
26857 - atomic_inc(&atm_vcc->stats->tx_err);
26858 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26859 return -ENOMEM; // ?
26860 }
26861
26862 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
26863 index b22d71c..d6e1049 100644
26864 --- a/drivers/atm/atmtcp.c
26865 +++ b/drivers/atm/atmtcp.c
26866 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26867 if (vcc->pop) vcc->pop(vcc,skb);
26868 else dev_kfree_skb(skb);
26869 if (dev_data) return 0;
26870 - atomic_inc(&vcc->stats->tx_err);
26871 + atomic_inc_unchecked(&vcc->stats->tx_err);
26872 return -ENOLINK;
26873 }
26874 size = skb->len+sizeof(struct atmtcp_hdr);
26875 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26876 if (!new_skb) {
26877 if (vcc->pop) vcc->pop(vcc,skb);
26878 else dev_kfree_skb(skb);
26879 - atomic_inc(&vcc->stats->tx_err);
26880 + atomic_inc_unchecked(&vcc->stats->tx_err);
26881 return -ENOBUFS;
26882 }
26883 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26884 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26885 if (vcc->pop) vcc->pop(vcc,skb);
26886 else dev_kfree_skb(skb);
26887 out_vcc->push(out_vcc,new_skb);
26888 - atomic_inc(&vcc->stats->tx);
26889 - atomic_inc(&out_vcc->stats->rx);
26890 + atomic_inc_unchecked(&vcc->stats->tx);
26891 + atomic_inc_unchecked(&out_vcc->stats->rx);
26892 return 0;
26893 }
26894
26895 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26896 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26897 read_unlock(&vcc_sklist_lock);
26898 if (!out_vcc) {
26899 - atomic_inc(&vcc->stats->tx_err);
26900 + atomic_inc_unchecked(&vcc->stats->tx_err);
26901 goto done;
26902 }
26903 skb_pull(skb,sizeof(struct atmtcp_hdr));
26904 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26905 __net_timestamp(new_skb);
26906 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26907 out_vcc->push(out_vcc,new_skb);
26908 - atomic_inc(&vcc->stats->tx);
26909 - atomic_inc(&out_vcc->stats->rx);
26910 + atomic_inc_unchecked(&vcc->stats->tx);
26911 + atomic_inc_unchecked(&out_vcc->stats->rx);
26912 done:
26913 if (vcc->pop) vcc->pop(vcc,skb);
26914 else dev_kfree_skb(skb);
26915 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
26916 index 956e9ac..133516d 100644
26917 --- a/drivers/atm/eni.c
26918 +++ b/drivers/atm/eni.c
26919 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26920 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26921 vcc->dev->number);
26922 length = 0;
26923 - atomic_inc(&vcc->stats->rx_err);
26924 + atomic_inc_unchecked(&vcc->stats->rx_err);
26925 }
26926 else {
26927 length = ATM_CELL_SIZE-1; /* no HEC */
26928 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26929 size);
26930 }
26931 eff = length = 0;
26932 - atomic_inc(&vcc->stats->rx_err);
26933 + atomic_inc_unchecked(&vcc->stats->rx_err);
26934 }
26935 else {
26936 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26937 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26938 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26939 vcc->dev->number,vcc->vci,length,size << 2,descr);
26940 length = eff = 0;
26941 - atomic_inc(&vcc->stats->rx_err);
26942 + atomic_inc_unchecked(&vcc->stats->rx_err);
26943 }
26944 }
26945 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26946 @@ -771,7 +771,7 @@ rx_dequeued++;
26947 vcc->push(vcc,skb);
26948 pushed++;
26949 }
26950 - atomic_inc(&vcc->stats->rx);
26951 + atomic_inc_unchecked(&vcc->stats->rx);
26952 }
26953 wake_up(&eni_dev->rx_wait);
26954 }
26955 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
26956 PCI_DMA_TODEVICE);
26957 if (vcc->pop) vcc->pop(vcc,skb);
26958 else dev_kfree_skb_irq(skb);
26959 - atomic_inc(&vcc->stats->tx);
26960 + atomic_inc_unchecked(&vcc->stats->tx);
26961 wake_up(&eni_dev->tx_wait);
26962 dma_complete++;
26963 }
26964 @@ -1569,7 +1569,7 @@ tx_complete++;
26965 /*--------------------------------- entries ---------------------------------*/
26966
26967
26968 -static const char *media_name[] __devinitdata = {
26969 +static const char *media_name[] __devinitconst = {
26970 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
26971 "UTP", "05?", "06?", "07?", /* 4- 7 */
26972 "TAXI","09?", "10?", "11?", /* 8-11 */
26973 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
26974 index 5072f8a..fa52520d 100644
26975 --- a/drivers/atm/firestream.c
26976 +++ b/drivers/atm/firestream.c
26977 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
26978 }
26979 }
26980
26981 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26982 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26983
26984 fs_dprintk (FS_DEBUG_TXMEM, "i");
26985 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26986 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26987 #endif
26988 skb_put (skb, qe->p1 & 0xffff);
26989 ATM_SKB(skb)->vcc = atm_vcc;
26990 - atomic_inc(&atm_vcc->stats->rx);
26991 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26992 __net_timestamp(skb);
26993 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26994 atm_vcc->push (atm_vcc, skb);
26995 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26996 kfree (pe);
26997 }
26998 if (atm_vcc)
26999 - atomic_inc(&atm_vcc->stats->rx_drop);
27000 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27001 break;
27002 case 0x1f: /* Reassembly abort: no buffers. */
27003 /* Silently increment error counter. */
27004 if (atm_vcc)
27005 - atomic_inc(&atm_vcc->stats->rx_drop);
27006 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27007 break;
27008 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27009 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27010 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27011 index 361f5ae..7fc552d 100644
27012 --- a/drivers/atm/fore200e.c
27013 +++ b/drivers/atm/fore200e.c
27014 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27015 #endif
27016 /* check error condition */
27017 if (*entry->status & STATUS_ERROR)
27018 - atomic_inc(&vcc->stats->tx_err);
27019 + atomic_inc_unchecked(&vcc->stats->tx_err);
27020 else
27021 - atomic_inc(&vcc->stats->tx);
27022 + atomic_inc_unchecked(&vcc->stats->tx);
27023 }
27024 }
27025
27026 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27027 if (skb == NULL) {
27028 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27029
27030 - atomic_inc(&vcc->stats->rx_drop);
27031 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27032 return -ENOMEM;
27033 }
27034
27035 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27036
27037 dev_kfree_skb_any(skb);
27038
27039 - atomic_inc(&vcc->stats->rx_drop);
27040 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27041 return -ENOMEM;
27042 }
27043
27044 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27045
27046 vcc->push(vcc, skb);
27047 - atomic_inc(&vcc->stats->rx);
27048 + atomic_inc_unchecked(&vcc->stats->rx);
27049
27050 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27051
27052 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27053 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27054 fore200e->atm_dev->number,
27055 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27056 - atomic_inc(&vcc->stats->rx_err);
27057 + atomic_inc_unchecked(&vcc->stats->rx_err);
27058 }
27059 }
27060
27061 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27062 goto retry_here;
27063 }
27064
27065 - atomic_inc(&vcc->stats->tx_err);
27066 + atomic_inc_unchecked(&vcc->stats->tx_err);
27067
27068 fore200e->tx_sat++;
27069 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27070 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27071 index b182c2f..1c6fa8a 100644
27072 --- a/drivers/atm/he.c
27073 +++ b/drivers/atm/he.c
27074 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27075
27076 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27077 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27078 - atomic_inc(&vcc->stats->rx_drop);
27079 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27080 goto return_host_buffers;
27081 }
27082
27083 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27084 RBRQ_LEN_ERR(he_dev->rbrq_head)
27085 ? "LEN_ERR" : "",
27086 vcc->vpi, vcc->vci);
27087 - atomic_inc(&vcc->stats->rx_err);
27088 + atomic_inc_unchecked(&vcc->stats->rx_err);
27089 goto return_host_buffers;
27090 }
27091
27092 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27093 vcc->push(vcc, skb);
27094 spin_lock(&he_dev->global_lock);
27095
27096 - atomic_inc(&vcc->stats->rx);
27097 + atomic_inc_unchecked(&vcc->stats->rx);
27098
27099 return_host_buffers:
27100 ++pdus_assembled;
27101 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27102 tpd->vcc->pop(tpd->vcc, tpd->skb);
27103 else
27104 dev_kfree_skb_any(tpd->skb);
27105 - atomic_inc(&tpd->vcc->stats->tx_err);
27106 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27107 }
27108 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27109 return;
27110 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27111 vcc->pop(vcc, skb);
27112 else
27113 dev_kfree_skb_any(skb);
27114 - atomic_inc(&vcc->stats->tx_err);
27115 + atomic_inc_unchecked(&vcc->stats->tx_err);
27116 return -EINVAL;
27117 }
27118
27119 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27120 vcc->pop(vcc, skb);
27121 else
27122 dev_kfree_skb_any(skb);
27123 - atomic_inc(&vcc->stats->tx_err);
27124 + atomic_inc_unchecked(&vcc->stats->tx_err);
27125 return -EINVAL;
27126 }
27127 #endif
27128 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27129 vcc->pop(vcc, skb);
27130 else
27131 dev_kfree_skb_any(skb);
27132 - atomic_inc(&vcc->stats->tx_err);
27133 + atomic_inc_unchecked(&vcc->stats->tx_err);
27134 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27135 return -ENOMEM;
27136 }
27137 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27138 vcc->pop(vcc, skb);
27139 else
27140 dev_kfree_skb_any(skb);
27141 - atomic_inc(&vcc->stats->tx_err);
27142 + atomic_inc_unchecked(&vcc->stats->tx_err);
27143 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27144 return -ENOMEM;
27145 }
27146 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27147 __enqueue_tpd(he_dev, tpd, cid);
27148 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27149
27150 - atomic_inc(&vcc->stats->tx);
27151 + atomic_inc_unchecked(&vcc->stats->tx);
27152
27153 return 0;
27154 }
27155 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27156 index b812103..e391a49 100644
27157 --- a/drivers/atm/horizon.c
27158 +++ b/drivers/atm/horizon.c
27159 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27160 {
27161 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27162 // VC layer stats
27163 - atomic_inc(&vcc->stats->rx);
27164 + atomic_inc_unchecked(&vcc->stats->rx);
27165 __net_timestamp(skb);
27166 // end of our responsibility
27167 vcc->push (vcc, skb);
27168 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27169 dev->tx_iovec = NULL;
27170
27171 // VC layer stats
27172 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27173 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27174
27175 // free the skb
27176 hrz_kfree_skb (skb);
27177 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27178 index 1c05212..c28e200 100644
27179 --- a/drivers/atm/idt77252.c
27180 +++ b/drivers/atm/idt77252.c
27181 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27182 else
27183 dev_kfree_skb(skb);
27184
27185 - atomic_inc(&vcc->stats->tx);
27186 + atomic_inc_unchecked(&vcc->stats->tx);
27187 }
27188
27189 atomic_dec(&scq->used);
27190 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27191 if ((sb = dev_alloc_skb(64)) == NULL) {
27192 printk("%s: Can't allocate buffers for aal0.\n",
27193 card->name);
27194 - atomic_add(i, &vcc->stats->rx_drop);
27195 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27196 break;
27197 }
27198 if (!atm_charge(vcc, sb->truesize)) {
27199 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27200 card->name);
27201 - atomic_add(i - 1, &vcc->stats->rx_drop);
27202 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27203 dev_kfree_skb(sb);
27204 break;
27205 }
27206 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27207 ATM_SKB(sb)->vcc = vcc;
27208 __net_timestamp(sb);
27209 vcc->push(vcc, sb);
27210 - atomic_inc(&vcc->stats->rx);
27211 + atomic_inc_unchecked(&vcc->stats->rx);
27212
27213 cell += ATM_CELL_PAYLOAD;
27214 }
27215 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27216 "(CDC: %08x)\n",
27217 card->name, len, rpp->len, readl(SAR_REG_CDC));
27218 recycle_rx_pool_skb(card, rpp);
27219 - atomic_inc(&vcc->stats->rx_err);
27220 + atomic_inc_unchecked(&vcc->stats->rx_err);
27221 return;
27222 }
27223 if (stat & SAR_RSQE_CRC) {
27224 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27225 recycle_rx_pool_skb(card, rpp);
27226 - atomic_inc(&vcc->stats->rx_err);
27227 + atomic_inc_unchecked(&vcc->stats->rx_err);
27228 return;
27229 }
27230 if (skb_queue_len(&rpp->queue) > 1) {
27231 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27232 RXPRINTK("%s: Can't alloc RX skb.\n",
27233 card->name);
27234 recycle_rx_pool_skb(card, rpp);
27235 - atomic_inc(&vcc->stats->rx_err);
27236 + atomic_inc_unchecked(&vcc->stats->rx_err);
27237 return;
27238 }
27239 if (!atm_charge(vcc, skb->truesize)) {
27240 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27241 __net_timestamp(skb);
27242
27243 vcc->push(vcc, skb);
27244 - atomic_inc(&vcc->stats->rx);
27245 + atomic_inc_unchecked(&vcc->stats->rx);
27246
27247 return;
27248 }
27249 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27250 __net_timestamp(skb);
27251
27252 vcc->push(vcc, skb);
27253 - atomic_inc(&vcc->stats->rx);
27254 + atomic_inc_unchecked(&vcc->stats->rx);
27255
27256 if (skb->truesize > SAR_FB_SIZE_3)
27257 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27258 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27259 if (vcc->qos.aal != ATM_AAL0) {
27260 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27261 card->name, vpi, vci);
27262 - atomic_inc(&vcc->stats->rx_drop);
27263 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27264 goto drop;
27265 }
27266
27267 if ((sb = dev_alloc_skb(64)) == NULL) {
27268 printk("%s: Can't allocate buffers for AAL0.\n",
27269 card->name);
27270 - atomic_inc(&vcc->stats->rx_err);
27271 + atomic_inc_unchecked(&vcc->stats->rx_err);
27272 goto drop;
27273 }
27274
27275 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27276 ATM_SKB(sb)->vcc = vcc;
27277 __net_timestamp(sb);
27278 vcc->push(vcc, sb);
27279 - atomic_inc(&vcc->stats->rx);
27280 + atomic_inc_unchecked(&vcc->stats->rx);
27281
27282 drop:
27283 skb_pull(queue, 64);
27284 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27285
27286 if (vc == NULL) {
27287 printk("%s: NULL connection in send().\n", card->name);
27288 - atomic_inc(&vcc->stats->tx_err);
27289 + atomic_inc_unchecked(&vcc->stats->tx_err);
27290 dev_kfree_skb(skb);
27291 return -EINVAL;
27292 }
27293 if (!test_bit(VCF_TX, &vc->flags)) {
27294 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27295 - atomic_inc(&vcc->stats->tx_err);
27296 + atomic_inc_unchecked(&vcc->stats->tx_err);
27297 dev_kfree_skb(skb);
27298 return -EINVAL;
27299 }
27300 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27301 break;
27302 default:
27303 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27304 - atomic_inc(&vcc->stats->tx_err);
27305 + atomic_inc_unchecked(&vcc->stats->tx_err);
27306 dev_kfree_skb(skb);
27307 return -EINVAL;
27308 }
27309
27310 if (skb_shinfo(skb)->nr_frags != 0) {
27311 printk("%s: No scatter-gather yet.\n", card->name);
27312 - atomic_inc(&vcc->stats->tx_err);
27313 + atomic_inc_unchecked(&vcc->stats->tx_err);
27314 dev_kfree_skb(skb);
27315 return -EINVAL;
27316 }
27317 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27318
27319 err = queue_skb(card, vc, skb, oam);
27320 if (err) {
27321 - atomic_inc(&vcc->stats->tx_err);
27322 + atomic_inc_unchecked(&vcc->stats->tx_err);
27323 dev_kfree_skb(skb);
27324 return err;
27325 }
27326 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27327 skb = dev_alloc_skb(64);
27328 if (!skb) {
27329 printk("%s: Out of memory in send_oam().\n", card->name);
27330 - atomic_inc(&vcc->stats->tx_err);
27331 + atomic_inc_unchecked(&vcc->stats->tx_err);
27332 return -ENOMEM;
27333 }
27334 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27335 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27336 index 9e373ba..cf93727 100644
27337 --- a/drivers/atm/iphase.c
27338 +++ b/drivers/atm/iphase.c
27339 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27340 status = (u_short) (buf_desc_ptr->desc_mode);
27341 if (status & (RX_CER | RX_PTE | RX_OFL))
27342 {
27343 - atomic_inc(&vcc->stats->rx_err);
27344 + atomic_inc_unchecked(&vcc->stats->rx_err);
27345 IF_ERR(printk("IA: bad packet, dropping it");)
27346 if (status & RX_CER) {
27347 IF_ERR(printk(" cause: packet CRC error\n");)
27348 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27349 len = dma_addr - buf_addr;
27350 if (len > iadev->rx_buf_sz) {
27351 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27352 - atomic_inc(&vcc->stats->rx_err);
27353 + atomic_inc_unchecked(&vcc->stats->rx_err);
27354 goto out_free_desc;
27355 }
27356
27357 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27358 ia_vcc = INPH_IA_VCC(vcc);
27359 if (ia_vcc == NULL)
27360 {
27361 - atomic_inc(&vcc->stats->rx_err);
27362 + atomic_inc_unchecked(&vcc->stats->rx_err);
27363 atm_return(vcc, skb->truesize);
27364 dev_kfree_skb_any(skb);
27365 goto INCR_DLE;
27366 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27367 if ((length > iadev->rx_buf_sz) || (length >
27368 (skb->len - sizeof(struct cpcs_trailer))))
27369 {
27370 - atomic_inc(&vcc->stats->rx_err);
27371 + atomic_inc_unchecked(&vcc->stats->rx_err);
27372 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27373 length, skb->len);)
27374 atm_return(vcc, skb->truesize);
27375 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27376
27377 IF_RX(printk("rx_dle_intr: skb push");)
27378 vcc->push(vcc,skb);
27379 - atomic_inc(&vcc->stats->rx);
27380 + atomic_inc_unchecked(&vcc->stats->rx);
27381 iadev->rx_pkt_cnt++;
27382 }
27383 INCR_DLE:
27384 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27385 {
27386 struct k_sonet_stats *stats;
27387 stats = &PRIV(_ia_dev[board])->sonet_stats;
27388 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27389 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27390 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27391 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27392 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27393 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27394 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27395 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27396 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27397 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27398 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27399 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27400 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27401 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27402 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27403 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27404 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27405 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27406 }
27407 ia_cmds.status = 0;
27408 break;
27409 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27410 if ((desc == 0) || (desc > iadev->num_tx_desc))
27411 {
27412 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27413 - atomic_inc(&vcc->stats->tx);
27414 + atomic_inc_unchecked(&vcc->stats->tx);
27415 if (vcc->pop)
27416 vcc->pop(vcc, skb);
27417 else
27418 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27419 ATM_DESC(skb) = vcc->vci;
27420 skb_queue_tail(&iadev->tx_dma_q, skb);
27421
27422 - atomic_inc(&vcc->stats->tx);
27423 + atomic_inc_unchecked(&vcc->stats->tx);
27424 iadev->tx_pkt_cnt++;
27425 /* Increment transaction counter */
27426 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27427
27428 #if 0
27429 /* add flow control logic */
27430 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27431 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27432 if (iavcc->vc_desc_cnt > 10) {
27433 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27434 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27435 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27436 index f556969..0da15eb 100644
27437 --- a/drivers/atm/lanai.c
27438 +++ b/drivers/atm/lanai.c
27439 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27440 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27441 lanai_endtx(lanai, lvcc);
27442 lanai_free_skb(lvcc->tx.atmvcc, skb);
27443 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27444 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27445 }
27446
27447 /* Try to fill the buffer - don't call unless there is backlog */
27448 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
27449 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
27450 __net_timestamp(skb);
27451 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
27452 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
27453 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
27454 out:
27455 lvcc->rx.buf.ptr = end;
27456 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
27457 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27458 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
27459 "vcc %d\n", lanai->number, (unsigned int) s, vci);
27460 lanai->stats.service_rxnotaal5++;
27461 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27462 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27463 return 0;
27464 }
27465 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
27466 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27467 int bytes;
27468 read_unlock(&vcc_sklist_lock);
27469 DPRINTK("got trashed rx pdu on vci %d\n", vci);
27470 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27471 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27472 lvcc->stats.x.aal5.service_trash++;
27473 bytes = (SERVICE_GET_END(s) * 16) -
27474 (((unsigned long) lvcc->rx.buf.ptr) -
27475 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27476 }
27477 if (s & SERVICE_STREAM) {
27478 read_unlock(&vcc_sklist_lock);
27479 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27480 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27481 lvcc->stats.x.aal5.service_stream++;
27482 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
27483 "PDU on VCI %d!\n", lanai->number, vci);
27484 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27485 return 0;
27486 }
27487 DPRINTK("got rx crc error on vci %d\n", vci);
27488 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27489 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27490 lvcc->stats.x.aal5.service_rxcrc++;
27491 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
27492 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
27493 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
27494 index 1c70c45..300718d 100644
27495 --- a/drivers/atm/nicstar.c
27496 +++ b/drivers/atm/nicstar.c
27497 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27498 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
27499 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
27500 card->index);
27501 - atomic_inc(&vcc->stats->tx_err);
27502 + atomic_inc_unchecked(&vcc->stats->tx_err);
27503 dev_kfree_skb_any(skb);
27504 return -EINVAL;
27505 }
27506 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27507 if (!vc->tx) {
27508 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
27509 card->index);
27510 - atomic_inc(&vcc->stats->tx_err);
27511 + atomic_inc_unchecked(&vcc->stats->tx_err);
27512 dev_kfree_skb_any(skb);
27513 return -EINVAL;
27514 }
27515 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27516 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
27517 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
27518 card->index);
27519 - atomic_inc(&vcc->stats->tx_err);
27520 + atomic_inc_unchecked(&vcc->stats->tx_err);
27521 dev_kfree_skb_any(skb);
27522 return -EINVAL;
27523 }
27524
27525 if (skb_shinfo(skb)->nr_frags != 0) {
27526 printk("nicstar%d: No scatter-gather yet.\n", card->index);
27527 - atomic_inc(&vcc->stats->tx_err);
27528 + atomic_inc_unchecked(&vcc->stats->tx_err);
27529 dev_kfree_skb_any(skb);
27530 return -EINVAL;
27531 }
27532 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27533 }
27534
27535 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
27536 - atomic_inc(&vcc->stats->tx_err);
27537 + atomic_inc_unchecked(&vcc->stats->tx_err);
27538 dev_kfree_skb_any(skb);
27539 return -EIO;
27540 }
27541 - atomic_inc(&vcc->stats->tx);
27542 + atomic_inc_unchecked(&vcc->stats->tx);
27543
27544 return 0;
27545 }
27546 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27547 printk
27548 ("nicstar%d: Can't allocate buffers for aal0.\n",
27549 card->index);
27550 - atomic_add(i, &vcc->stats->rx_drop);
27551 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27552 break;
27553 }
27554 if (!atm_charge(vcc, sb->truesize)) {
27555 RXPRINTK
27556 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
27557 card->index);
27558 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27559 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27560 dev_kfree_skb_any(sb);
27561 break;
27562 }
27563 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27564 ATM_SKB(sb)->vcc = vcc;
27565 __net_timestamp(sb);
27566 vcc->push(vcc, sb);
27567 - atomic_inc(&vcc->stats->rx);
27568 + atomic_inc_unchecked(&vcc->stats->rx);
27569 cell += ATM_CELL_PAYLOAD;
27570 }
27571
27572 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27573 if (iovb == NULL) {
27574 printk("nicstar%d: Out of iovec buffers.\n",
27575 card->index);
27576 - atomic_inc(&vcc->stats->rx_drop);
27577 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27578 recycle_rx_buf(card, skb);
27579 return;
27580 }
27581 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27582 small or large buffer itself. */
27583 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
27584 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
27585 - atomic_inc(&vcc->stats->rx_err);
27586 + atomic_inc_unchecked(&vcc->stats->rx_err);
27587 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27588 NS_MAX_IOVECS);
27589 NS_PRV_IOVCNT(iovb) = 0;
27590 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27591 ("nicstar%d: Expected a small buffer, and this is not one.\n",
27592 card->index);
27593 which_list(card, skb);
27594 - atomic_inc(&vcc->stats->rx_err);
27595 + atomic_inc_unchecked(&vcc->stats->rx_err);
27596 recycle_rx_buf(card, skb);
27597 vc->rx_iov = NULL;
27598 recycle_iov_buf(card, iovb);
27599 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27600 ("nicstar%d: Expected a large buffer, and this is not one.\n",
27601 card->index);
27602 which_list(card, skb);
27603 - atomic_inc(&vcc->stats->rx_err);
27604 + atomic_inc_unchecked(&vcc->stats->rx_err);
27605 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27606 NS_PRV_IOVCNT(iovb));
27607 vc->rx_iov = NULL;
27608 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27609 printk(" - PDU size mismatch.\n");
27610 else
27611 printk(".\n");
27612 - atomic_inc(&vcc->stats->rx_err);
27613 + atomic_inc_unchecked(&vcc->stats->rx_err);
27614 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27615 NS_PRV_IOVCNT(iovb));
27616 vc->rx_iov = NULL;
27617 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27618 /* skb points to a small buffer */
27619 if (!atm_charge(vcc, skb->truesize)) {
27620 push_rxbufs(card, skb);
27621 - atomic_inc(&vcc->stats->rx_drop);
27622 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27623 } else {
27624 skb_put(skb, len);
27625 dequeue_sm_buf(card, skb);
27626 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27627 ATM_SKB(skb)->vcc = vcc;
27628 __net_timestamp(skb);
27629 vcc->push(vcc, skb);
27630 - atomic_inc(&vcc->stats->rx);
27631 + atomic_inc_unchecked(&vcc->stats->rx);
27632 }
27633 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
27634 struct sk_buff *sb;
27635 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27636 if (len <= NS_SMBUFSIZE) {
27637 if (!atm_charge(vcc, sb->truesize)) {
27638 push_rxbufs(card, sb);
27639 - atomic_inc(&vcc->stats->rx_drop);
27640 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27641 } else {
27642 skb_put(sb, len);
27643 dequeue_sm_buf(card, sb);
27644 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27645 ATM_SKB(sb)->vcc = vcc;
27646 __net_timestamp(sb);
27647 vcc->push(vcc, sb);
27648 - atomic_inc(&vcc->stats->rx);
27649 + atomic_inc_unchecked(&vcc->stats->rx);
27650 }
27651
27652 push_rxbufs(card, skb);
27653 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27654
27655 if (!atm_charge(vcc, skb->truesize)) {
27656 push_rxbufs(card, skb);
27657 - atomic_inc(&vcc->stats->rx_drop);
27658 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27659 } else {
27660 dequeue_lg_buf(card, skb);
27661 #ifdef NS_USE_DESTRUCTORS
27662 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27663 ATM_SKB(skb)->vcc = vcc;
27664 __net_timestamp(skb);
27665 vcc->push(vcc, skb);
27666 - atomic_inc(&vcc->stats->rx);
27667 + atomic_inc_unchecked(&vcc->stats->rx);
27668 }
27669
27670 push_rxbufs(card, sb);
27671 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27672 printk
27673 ("nicstar%d: Out of huge buffers.\n",
27674 card->index);
27675 - atomic_inc(&vcc->stats->rx_drop);
27676 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27677 recycle_iovec_rx_bufs(card,
27678 (struct iovec *)
27679 iovb->data,
27680 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27681 card->hbpool.count++;
27682 } else
27683 dev_kfree_skb_any(hb);
27684 - atomic_inc(&vcc->stats->rx_drop);
27685 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27686 } else {
27687 /* Copy the small buffer to the huge buffer */
27688 sb = (struct sk_buff *)iov->iov_base;
27689 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27690 #endif /* NS_USE_DESTRUCTORS */
27691 __net_timestamp(hb);
27692 vcc->push(vcc, hb);
27693 - atomic_inc(&vcc->stats->rx);
27694 + atomic_inc_unchecked(&vcc->stats->rx);
27695 }
27696 }
27697
27698 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
27699 index e8cd652..bbbd1fc 100644
27700 --- a/drivers/atm/solos-pci.c
27701 +++ b/drivers/atm/solos-pci.c
27702 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
27703 }
27704 atm_charge(vcc, skb->truesize);
27705 vcc->push(vcc, skb);
27706 - atomic_inc(&vcc->stats->rx);
27707 + atomic_inc_unchecked(&vcc->stats->rx);
27708 break;
27709
27710 case PKT_STATUS:
27711 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
27712 vcc = SKB_CB(oldskb)->vcc;
27713
27714 if (vcc) {
27715 - atomic_inc(&vcc->stats->tx);
27716 + atomic_inc_unchecked(&vcc->stats->tx);
27717 solos_pop(vcc, oldskb);
27718 } else
27719 dev_kfree_skb_irq(oldskb);
27720 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
27721 index 90f1ccc..04c4a1e 100644
27722 --- a/drivers/atm/suni.c
27723 +++ b/drivers/atm/suni.c
27724 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27725
27726
27727 #define ADD_LIMITED(s,v) \
27728 - atomic_add((v),&stats->s); \
27729 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27730 + atomic_add_unchecked((v),&stats->s); \
27731 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27732
27733
27734 static void suni_hz(unsigned long from_timer)
27735 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
27736 index 5120a96..e2572bd 100644
27737 --- a/drivers/atm/uPD98402.c
27738 +++ b/drivers/atm/uPD98402.c
27739 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
27740 struct sonet_stats tmp;
27741 int error = 0;
27742
27743 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27744 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27745 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27746 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27747 if (zero && !error) {
27748 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
27749
27750
27751 #define ADD_LIMITED(s,v) \
27752 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27753 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27754 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27755 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27756 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27757 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27758
27759
27760 static void stat_event(struct atm_dev *dev)
27761 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
27762 if (reason & uPD98402_INT_PFM) stat_event(dev);
27763 if (reason & uPD98402_INT_PCO) {
27764 (void) GET(PCOCR); /* clear interrupt cause */
27765 - atomic_add(GET(HECCT),
27766 + atomic_add_unchecked(GET(HECCT),
27767 &PRIV(dev)->sonet_stats.uncorr_hcs);
27768 }
27769 if ((reason & uPD98402_INT_RFO) &&
27770 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
27771 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27772 uPD98402_INT_LOS),PIMR); /* enable them */
27773 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27774 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27775 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27776 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27777 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27778 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27779 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27780 return 0;
27781 }
27782
27783 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
27784 index d889f56..17eb71e 100644
27785 --- a/drivers/atm/zatm.c
27786 +++ b/drivers/atm/zatm.c
27787 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27788 }
27789 if (!size) {
27790 dev_kfree_skb_irq(skb);
27791 - if (vcc) atomic_inc(&vcc->stats->rx_err);
27792 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27793 continue;
27794 }
27795 if (!atm_charge(vcc,skb->truesize)) {
27796 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27797 skb->len = size;
27798 ATM_SKB(skb)->vcc = vcc;
27799 vcc->push(vcc,skb);
27800 - atomic_inc(&vcc->stats->rx);
27801 + atomic_inc_unchecked(&vcc->stats->rx);
27802 }
27803 zout(pos & 0xffff,MTA(mbx));
27804 #if 0 /* probably a stupid idea */
27805 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
27806 skb_queue_head(&zatm_vcc->backlog,skb);
27807 break;
27808 }
27809 - atomic_inc(&vcc->stats->tx);
27810 + atomic_inc_unchecked(&vcc->stats->tx);
27811 wake_up(&zatm_vcc->tx_wait);
27812 }
27813
27814 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
27815 index 8493536..31adee0 100644
27816 --- a/drivers/base/devtmpfs.c
27817 +++ b/drivers/base/devtmpfs.c
27818 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
27819 if (!thread)
27820 return 0;
27821
27822 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
27823 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
27824 if (err)
27825 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
27826 else
27827 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
27828 index caf995f..6f76697 100644
27829 --- a/drivers/base/power/wakeup.c
27830 +++ b/drivers/base/power/wakeup.c
27831 @@ -30,14 +30,14 @@ bool events_check_enabled;
27832 * They need to be modified together atomically, so it's better to use one
27833 * atomic variable to hold them both.
27834 */
27835 -static atomic_t combined_event_count = ATOMIC_INIT(0);
27836 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
27837
27838 #define IN_PROGRESS_BITS (sizeof(int) * 4)
27839 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
27840
27841 static void split_counters(unsigned int *cnt, unsigned int *inpr)
27842 {
27843 - unsigned int comb = atomic_read(&combined_event_count);
27844 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
27845
27846 *cnt = (comb >> IN_PROGRESS_BITS);
27847 *inpr = comb & MAX_IN_PROGRESS;
27848 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
27849 ws->last_time = ktime_get();
27850
27851 /* Increment the counter of events in progress. */
27852 - atomic_inc(&combined_event_count);
27853 + atomic_inc_unchecked(&combined_event_count);
27854 }
27855
27856 /**
27857 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
27858 * Increment the counter of registered wakeup events and decrement the
27859 * couter of wakeup events in progress simultaneously.
27860 */
27861 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
27862 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
27863 }
27864
27865 /**
27866 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
27867 index b0f553b..77b928b 100644
27868 --- a/drivers/block/cciss.c
27869 +++ b/drivers/block/cciss.c
27870 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
27871 int err;
27872 u32 cp;
27873
27874 + memset(&arg64, 0, sizeof(arg64));
27875 +
27876 err = 0;
27877 err |=
27878 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27879 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
27880 while (!list_empty(&h->reqQ)) {
27881 c = list_entry(h->reqQ.next, CommandList_struct, list);
27882 /* can't do anything if fifo is full */
27883 - if ((h->access.fifo_full(h))) {
27884 + if ((h->access->fifo_full(h))) {
27885 dev_warn(&h->pdev->dev, "fifo full\n");
27886 break;
27887 }
27888 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
27889 h->Qdepth--;
27890
27891 /* Tell the controller execute command */
27892 - h->access.submit_command(h, c);
27893 + h->access->submit_command(h, c);
27894
27895 /* Put job onto the completed Q */
27896 addQ(&h->cmpQ, c);
27897 @@ -3443,17 +3445,17 @@ startio:
27898
27899 static inline unsigned long get_next_completion(ctlr_info_t *h)
27900 {
27901 - return h->access.command_completed(h);
27902 + return h->access->command_completed(h);
27903 }
27904
27905 static inline int interrupt_pending(ctlr_info_t *h)
27906 {
27907 - return h->access.intr_pending(h);
27908 + return h->access->intr_pending(h);
27909 }
27910
27911 static inline long interrupt_not_for_us(ctlr_info_t *h)
27912 {
27913 - return ((h->access.intr_pending(h) == 0) ||
27914 + return ((h->access->intr_pending(h) == 0) ||
27915 (h->interrupts_enabled == 0));
27916 }
27917
27918 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
27919 u32 a;
27920
27921 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
27922 - return h->access.command_completed(h);
27923 + return h->access->command_completed(h);
27924
27925 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
27926 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
27927 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
27928 trans_support & CFGTBL_Trans_use_short_tags);
27929
27930 /* Change the access methods to the performant access methods */
27931 - h->access = SA5_performant_access;
27932 + h->access = &SA5_performant_access;
27933 h->transMethod = CFGTBL_Trans_Performant;
27934
27935 return;
27936 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
27937 if (prod_index < 0)
27938 return -ENODEV;
27939 h->product_name = products[prod_index].product_name;
27940 - h->access = *(products[prod_index].access);
27941 + h->access = products[prod_index].access;
27942
27943 if (cciss_board_disabled(h)) {
27944 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
27945 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
27946 }
27947
27948 /* make sure the board interrupts are off */
27949 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27950 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27951 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
27952 if (rc)
27953 goto clean2;
27954 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
27955 * fake ones to scoop up any residual completions.
27956 */
27957 spin_lock_irqsave(&h->lock, flags);
27958 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27959 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27960 spin_unlock_irqrestore(&h->lock, flags);
27961 free_irq(h->intr[h->intr_mode], h);
27962 rc = cciss_request_irq(h, cciss_msix_discard_completions,
27963 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
27964 dev_info(&h->pdev->dev, "Board READY.\n");
27965 dev_info(&h->pdev->dev,
27966 "Waiting for stale completions to drain.\n");
27967 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27968 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27969 msleep(10000);
27970 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27971 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27972
27973 rc = controller_reset_failed(h->cfgtable);
27974 if (rc)
27975 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
27976 cciss_scsi_setup(h);
27977
27978 /* Turn the interrupts on so we can service requests */
27979 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27980 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27981
27982 /* Get the firmware version */
27983 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27984 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
27985 kfree(flush_buf);
27986 if (return_code != IO_OK)
27987 dev_warn(&h->pdev->dev, "Error flushing cache\n");
27988 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27989 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27990 free_irq(h->intr[h->intr_mode], h);
27991 }
27992
27993 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
27994 index 7fda30e..eb5dfe0 100644
27995 --- a/drivers/block/cciss.h
27996 +++ b/drivers/block/cciss.h
27997 @@ -101,7 +101,7 @@ struct ctlr_info
27998 /* information about each logical volume */
27999 drive_info_struct *drv[CISS_MAX_LUN];
28000
28001 - struct access_method access;
28002 + struct access_method *access;
28003
28004 /* queue and queue Info */
28005 struct list_head reqQ;
28006 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28007 index 9125bbe..eede5c8 100644
28008 --- a/drivers/block/cpqarray.c
28009 +++ b/drivers/block/cpqarray.c
28010 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28011 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28012 goto Enomem4;
28013 }
28014 - hba[i]->access.set_intr_mask(hba[i], 0);
28015 + hba[i]->access->set_intr_mask(hba[i], 0);
28016 if (request_irq(hba[i]->intr, do_ida_intr,
28017 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28018 {
28019 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28020 add_timer(&hba[i]->timer);
28021
28022 /* Enable IRQ now that spinlock and rate limit timer are set up */
28023 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28024 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28025
28026 for(j=0; j<NWD; j++) {
28027 struct gendisk *disk = ida_gendisk[i][j];
28028 @@ -694,7 +694,7 @@ DBGINFO(
28029 for(i=0; i<NR_PRODUCTS; i++) {
28030 if (board_id == products[i].board_id) {
28031 c->product_name = products[i].product_name;
28032 - c->access = *(products[i].access);
28033 + c->access = products[i].access;
28034 break;
28035 }
28036 }
28037 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28038 hba[ctlr]->intr = intr;
28039 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28040 hba[ctlr]->product_name = products[j].product_name;
28041 - hba[ctlr]->access = *(products[j].access);
28042 + hba[ctlr]->access = products[j].access;
28043 hba[ctlr]->ctlr = ctlr;
28044 hba[ctlr]->board_id = board_id;
28045 hba[ctlr]->pci_dev = NULL; /* not PCI */
28046 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28047
28048 while((c = h->reqQ) != NULL) {
28049 /* Can't do anything if we're busy */
28050 - if (h->access.fifo_full(h) == 0)
28051 + if (h->access->fifo_full(h) == 0)
28052 return;
28053
28054 /* Get the first entry from the request Q */
28055 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28056 h->Qdepth--;
28057
28058 /* Tell the controller to do our bidding */
28059 - h->access.submit_command(h, c);
28060 + h->access->submit_command(h, c);
28061
28062 /* Get onto the completion Q */
28063 addQ(&h->cmpQ, c);
28064 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28065 unsigned long flags;
28066 __u32 a,a1;
28067
28068 - istat = h->access.intr_pending(h);
28069 + istat = h->access->intr_pending(h);
28070 /* Is this interrupt for us? */
28071 if (istat == 0)
28072 return IRQ_NONE;
28073 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28074 */
28075 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28076 if (istat & FIFO_NOT_EMPTY) {
28077 - while((a = h->access.command_completed(h))) {
28078 + while((a = h->access->command_completed(h))) {
28079 a1 = a; a &= ~3;
28080 if ((c = h->cmpQ) == NULL)
28081 {
28082 @@ -1449,11 +1449,11 @@ static int sendcmd(
28083 /*
28084 * Disable interrupt
28085 */
28086 - info_p->access.set_intr_mask(info_p, 0);
28087 + info_p->access->set_intr_mask(info_p, 0);
28088 /* Make sure there is room in the command FIFO */
28089 /* Actually it should be completely empty at this time. */
28090 for (i = 200000; i > 0; i--) {
28091 - temp = info_p->access.fifo_full(info_p);
28092 + temp = info_p->access->fifo_full(info_p);
28093 if (temp != 0) {
28094 break;
28095 }
28096 @@ -1466,7 +1466,7 @@ DBG(
28097 /*
28098 * Send the cmd
28099 */
28100 - info_p->access.submit_command(info_p, c);
28101 + info_p->access->submit_command(info_p, c);
28102 complete = pollcomplete(ctlr);
28103
28104 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28105 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28106 * we check the new geometry. Then turn interrupts back on when
28107 * we're done.
28108 */
28109 - host->access.set_intr_mask(host, 0);
28110 + host->access->set_intr_mask(host, 0);
28111 getgeometry(ctlr);
28112 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28113 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28114
28115 for(i=0; i<NWD; i++) {
28116 struct gendisk *disk = ida_gendisk[ctlr][i];
28117 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28118 /* Wait (up to 2 seconds) for a command to complete */
28119
28120 for (i = 200000; i > 0; i--) {
28121 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28122 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28123 if (done == 0) {
28124 udelay(10); /* a short fixed delay */
28125 } else
28126 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28127 index be73e9d..7fbf140 100644
28128 --- a/drivers/block/cpqarray.h
28129 +++ b/drivers/block/cpqarray.h
28130 @@ -99,7 +99,7 @@ struct ctlr_info {
28131 drv_info_t drv[NWD];
28132 struct proc_dir_entry *proc;
28133
28134 - struct access_method access;
28135 + struct access_method *access;
28136
28137 cmdlist_t *reqQ;
28138 cmdlist_t *cmpQ;
28139 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28140 index 8d68056..e67050f 100644
28141 --- a/drivers/block/drbd/drbd_int.h
28142 +++ b/drivers/block/drbd/drbd_int.h
28143 @@ -736,7 +736,7 @@ struct drbd_request;
28144 struct drbd_epoch {
28145 struct list_head list;
28146 unsigned int barrier_nr;
28147 - atomic_t epoch_size; /* increased on every request added. */
28148 + atomic_unchecked_t epoch_size; /* increased on every request added. */
28149 atomic_t active; /* increased on every req. added, and dec on every finished. */
28150 unsigned long flags;
28151 };
28152 @@ -1108,7 +1108,7 @@ struct drbd_conf {
28153 void *int_dig_in;
28154 void *int_dig_vv;
28155 wait_queue_head_t seq_wait;
28156 - atomic_t packet_seq;
28157 + atomic_unchecked_t packet_seq;
28158 unsigned int peer_seq;
28159 spinlock_t peer_seq_lock;
28160 unsigned int minor;
28161 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28162
28163 static inline void drbd_tcp_cork(struct socket *sock)
28164 {
28165 - int __user val = 1;
28166 + int val = 1;
28167 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28168 - (char __user *)&val, sizeof(val));
28169 + (char __force_user *)&val, sizeof(val));
28170 }
28171
28172 static inline void drbd_tcp_uncork(struct socket *sock)
28173 {
28174 - int __user val = 0;
28175 + int val = 0;
28176 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28177 - (char __user *)&val, sizeof(val));
28178 + (char __force_user *)&val, sizeof(val));
28179 }
28180
28181 static inline void drbd_tcp_nodelay(struct socket *sock)
28182 {
28183 - int __user val = 1;
28184 + int val = 1;
28185 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28186 - (char __user *)&val, sizeof(val));
28187 + (char __force_user *)&val, sizeof(val));
28188 }
28189
28190 static inline void drbd_tcp_quickack(struct socket *sock)
28191 {
28192 - int __user val = 2;
28193 + int val = 2;
28194 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28195 - (char __user *)&val, sizeof(val));
28196 + (char __force_user *)&val, sizeof(val));
28197 }
28198
28199 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28200 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28201 index 211fc44..c5116f1 100644
28202 --- a/drivers/block/drbd/drbd_main.c
28203 +++ b/drivers/block/drbd/drbd_main.c
28204 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28205 p.sector = sector;
28206 p.block_id = block_id;
28207 p.blksize = blksize;
28208 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28209 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28210
28211 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28212 return false;
28213 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28214 p.sector = cpu_to_be64(req->sector);
28215 p.block_id = (unsigned long)req;
28216 p.seq_num = cpu_to_be32(req->seq_num =
28217 - atomic_add_return(1, &mdev->packet_seq));
28218 + atomic_add_return_unchecked(1, &mdev->packet_seq));
28219
28220 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28221
28222 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28223 atomic_set(&mdev->unacked_cnt, 0);
28224 atomic_set(&mdev->local_cnt, 0);
28225 atomic_set(&mdev->net_cnt, 0);
28226 - atomic_set(&mdev->packet_seq, 0);
28227 + atomic_set_unchecked(&mdev->packet_seq, 0);
28228 atomic_set(&mdev->pp_in_use, 0);
28229 atomic_set(&mdev->pp_in_use_by_net, 0);
28230 atomic_set(&mdev->rs_sect_in, 0);
28231 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28232 mdev->receiver.t_state);
28233
28234 /* no need to lock it, I'm the only thread alive */
28235 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28236 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28237 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28238 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28239 mdev->al_writ_cnt =
28240 mdev->bm_writ_cnt =
28241 mdev->read_cnt =
28242 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28243 index af2a250..219c74b 100644
28244 --- a/drivers/block/drbd/drbd_nl.c
28245 +++ b/drivers/block/drbd/drbd_nl.c
28246 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28247 module_put(THIS_MODULE);
28248 }
28249
28250 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28251 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28252
28253 static unsigned short *
28254 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28255 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28256 cn_reply->id.idx = CN_IDX_DRBD;
28257 cn_reply->id.val = CN_VAL_DRBD;
28258
28259 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28260 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28261 cn_reply->ack = 0; /* not used here. */
28262 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28263 (int)((char *)tl - (char *)reply->tag_list);
28264 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28265 cn_reply->id.idx = CN_IDX_DRBD;
28266 cn_reply->id.val = CN_VAL_DRBD;
28267
28268 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28269 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28270 cn_reply->ack = 0; /* not used here. */
28271 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28272 (int)((char *)tl - (char *)reply->tag_list);
28273 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28274 cn_reply->id.idx = CN_IDX_DRBD;
28275 cn_reply->id.val = CN_VAL_DRBD;
28276
28277 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28278 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28279 cn_reply->ack = 0; // not used here.
28280 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28281 (int)((char*)tl - (char*)reply->tag_list);
28282 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28283 cn_reply->id.idx = CN_IDX_DRBD;
28284 cn_reply->id.val = CN_VAL_DRBD;
28285
28286 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28287 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28288 cn_reply->ack = 0; /* not used here. */
28289 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28290 (int)((char *)tl - (char *)reply->tag_list);
28291 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28292 index 43beaca..4a5b1dd 100644
28293 --- a/drivers/block/drbd/drbd_receiver.c
28294 +++ b/drivers/block/drbd/drbd_receiver.c
28295 @@ -894,7 +894,7 @@ retry:
28296 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28297 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28298
28299 - atomic_set(&mdev->packet_seq, 0);
28300 + atomic_set_unchecked(&mdev->packet_seq, 0);
28301 mdev->peer_seq = 0;
28302
28303 drbd_thread_start(&mdev->asender);
28304 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28305 do {
28306 next_epoch = NULL;
28307
28308 - epoch_size = atomic_read(&epoch->epoch_size);
28309 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28310
28311 switch (ev & ~EV_CLEANUP) {
28312 case EV_PUT:
28313 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28314 rv = FE_DESTROYED;
28315 } else {
28316 epoch->flags = 0;
28317 - atomic_set(&epoch->epoch_size, 0);
28318 + atomic_set_unchecked(&epoch->epoch_size, 0);
28319 /* atomic_set(&epoch->active, 0); is already zero */
28320 if (rv == FE_STILL_LIVE)
28321 rv = FE_RECYCLED;
28322 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28323 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28324 drbd_flush(mdev);
28325
28326 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28327 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28328 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28329 if (epoch)
28330 break;
28331 }
28332
28333 epoch = mdev->current_epoch;
28334 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28335 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28336
28337 D_ASSERT(atomic_read(&epoch->active) == 0);
28338 D_ASSERT(epoch->flags == 0);
28339 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28340 }
28341
28342 epoch->flags = 0;
28343 - atomic_set(&epoch->epoch_size, 0);
28344 + atomic_set_unchecked(&epoch->epoch_size, 0);
28345 atomic_set(&epoch->active, 0);
28346
28347 spin_lock(&mdev->epoch_lock);
28348 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28349 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28350 list_add(&epoch->list, &mdev->current_epoch->list);
28351 mdev->current_epoch = epoch;
28352 mdev->epochs++;
28353 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28354 spin_unlock(&mdev->peer_seq_lock);
28355
28356 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28357 - atomic_inc(&mdev->current_epoch->epoch_size);
28358 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28359 return drbd_drain_block(mdev, data_size);
28360 }
28361
28362 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28363
28364 spin_lock(&mdev->epoch_lock);
28365 e->epoch = mdev->current_epoch;
28366 - atomic_inc(&e->epoch->epoch_size);
28367 + atomic_inc_unchecked(&e->epoch->epoch_size);
28368 atomic_inc(&e->epoch->active);
28369 spin_unlock(&mdev->epoch_lock);
28370
28371 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28372 D_ASSERT(list_empty(&mdev->done_ee));
28373
28374 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28375 - atomic_set(&mdev->current_epoch->epoch_size, 0);
28376 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28377 D_ASSERT(list_empty(&mdev->current_epoch->list));
28378 }
28379
28380 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28381 index cd50435..ba1ffb5 100644
28382 --- a/drivers/block/loop.c
28383 +++ b/drivers/block/loop.c
28384 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
28385 mm_segment_t old_fs = get_fs();
28386
28387 set_fs(get_ds());
28388 - bw = file->f_op->write(file, buf, len, &pos);
28389 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28390 set_fs(old_fs);
28391 if (likely(bw == len))
28392 return 0;
28393 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28394 index 4364303..9adf4ee 100644
28395 --- a/drivers/char/Kconfig
28396 +++ b/drivers/char/Kconfig
28397 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28398
28399 config DEVKMEM
28400 bool "/dev/kmem virtual device support"
28401 - default y
28402 + default n
28403 + depends on !GRKERNSEC_KMEM
28404 help
28405 Say Y here if you want to support the /dev/kmem device. The
28406 /dev/kmem device is rarely used, but can be used for certain
28407 @@ -596,6 +597,7 @@ config DEVPORT
28408 bool
28409 depends on !M68K
28410 depends on ISA || PCI
28411 + depends on !GRKERNSEC_KMEM
28412 default y
28413
28414 source "drivers/s390/char/Kconfig"
28415 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28416 index 2e04433..22afc64 100644
28417 --- a/drivers/char/agp/frontend.c
28418 +++ b/drivers/char/agp/frontend.c
28419 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28420 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28421 return -EFAULT;
28422
28423 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28424 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28425 return -EFAULT;
28426
28427 client = agp_find_client_by_pid(reserve.pid);
28428 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28429 index 095ab90..afad0a4 100644
28430 --- a/drivers/char/briq_panel.c
28431 +++ b/drivers/char/briq_panel.c
28432 @@ -9,6 +9,7 @@
28433 #include <linux/types.h>
28434 #include <linux/errno.h>
28435 #include <linux/tty.h>
28436 +#include <linux/mutex.h>
28437 #include <linux/timer.h>
28438 #include <linux/kernel.h>
28439 #include <linux/wait.h>
28440 @@ -34,6 +35,7 @@ static int vfd_is_open;
28441 static unsigned char vfd[40];
28442 static int vfd_cursor;
28443 static unsigned char ledpb, led;
28444 +static DEFINE_MUTEX(vfd_mutex);
28445
28446 static void update_vfd(void)
28447 {
28448 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28449 if (!vfd_is_open)
28450 return -EBUSY;
28451
28452 + mutex_lock(&vfd_mutex);
28453 for (;;) {
28454 char c;
28455 if (!indx)
28456 break;
28457 - if (get_user(c, buf))
28458 + if (get_user(c, buf)) {
28459 + mutex_unlock(&vfd_mutex);
28460 return -EFAULT;
28461 + }
28462 if (esc) {
28463 set_led(c);
28464 esc = 0;
28465 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28466 buf++;
28467 }
28468 update_vfd();
28469 + mutex_unlock(&vfd_mutex);
28470
28471 return len;
28472 }
28473 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
28474 index f773a9d..65cd683 100644
28475 --- a/drivers/char/genrtc.c
28476 +++ b/drivers/char/genrtc.c
28477 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
28478 switch (cmd) {
28479
28480 case RTC_PLL_GET:
28481 + memset(&pll, 0, sizeof(pll));
28482 if (get_rtc_pll(&pll))
28483 return -EINVAL;
28484 else
28485 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
28486 index 0833896..cccce52 100644
28487 --- a/drivers/char/hpet.c
28488 +++ b/drivers/char/hpet.c
28489 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
28490 }
28491
28492 static int
28493 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
28494 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
28495 struct hpet_info *info)
28496 {
28497 struct hpet_timer __iomem *timer;
28498 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
28499 index 58c0e63..46c16bf 100644
28500 --- a/drivers/char/ipmi/ipmi_msghandler.c
28501 +++ b/drivers/char/ipmi/ipmi_msghandler.c
28502 @@ -415,7 +415,7 @@ struct ipmi_smi {
28503 struct proc_dir_entry *proc_dir;
28504 char proc_dir_name[10];
28505
28506 - atomic_t stats[IPMI_NUM_STATS];
28507 + atomic_unchecked_t stats[IPMI_NUM_STATS];
28508
28509 /*
28510 * run_to_completion duplicate of smb_info, smi_info
28511 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
28512
28513
28514 #define ipmi_inc_stat(intf, stat) \
28515 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
28516 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
28517 #define ipmi_get_stat(intf, stat) \
28518 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
28519 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
28520
28521 static int is_lan_addr(struct ipmi_addr *addr)
28522 {
28523 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
28524 INIT_LIST_HEAD(&intf->cmd_rcvrs);
28525 init_waitqueue_head(&intf->waitq);
28526 for (i = 0; i < IPMI_NUM_STATS; i++)
28527 - atomic_set(&intf->stats[i], 0);
28528 + atomic_set_unchecked(&intf->stats[i], 0);
28529
28530 intf->proc_dir = NULL;
28531
28532 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
28533 index 50fcf9c..91b5528 100644
28534 --- a/drivers/char/ipmi/ipmi_si_intf.c
28535 +++ b/drivers/char/ipmi/ipmi_si_intf.c
28536 @@ -277,7 +277,7 @@ struct smi_info {
28537 unsigned char slave_addr;
28538
28539 /* Counters and things for the proc filesystem. */
28540 - atomic_t stats[SI_NUM_STATS];
28541 + atomic_unchecked_t stats[SI_NUM_STATS];
28542
28543 struct task_struct *thread;
28544
28545 @@ -286,9 +286,9 @@ struct smi_info {
28546 };
28547
28548 #define smi_inc_stat(smi, stat) \
28549 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28550 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28551 #define smi_get_stat(smi, stat) \
28552 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28553 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28554
28555 #define SI_MAX_PARMS 4
28556
28557 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
28558 atomic_set(&new_smi->req_events, 0);
28559 new_smi->run_to_completion = 0;
28560 for (i = 0; i < SI_NUM_STATS; i++)
28561 - atomic_set(&new_smi->stats[i], 0);
28562 + atomic_set_unchecked(&new_smi->stats[i], 0);
28563
28564 new_smi->interrupt_disabled = 1;
28565 atomic_set(&new_smi->stop_operation, 0);
28566 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
28567 index 1aeaaba..e018570 100644
28568 --- a/drivers/char/mbcs.c
28569 +++ b/drivers/char/mbcs.c
28570 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
28571 return 0;
28572 }
28573
28574 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
28575 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
28576 {
28577 .part_num = MBCS_PART_NUM,
28578 .mfg_num = MBCS_MFG_NUM,
28579 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
28580 index d6e9d08..4493e89 100644
28581 --- a/drivers/char/mem.c
28582 +++ b/drivers/char/mem.c
28583 @@ -18,6 +18,7 @@
28584 #include <linux/raw.h>
28585 #include <linux/tty.h>
28586 #include <linux/capability.h>
28587 +#include <linux/security.h>
28588 #include <linux/ptrace.h>
28589 #include <linux/device.h>
28590 #include <linux/highmem.h>
28591 @@ -35,6 +36,10 @@
28592 # include <linux/efi.h>
28593 #endif
28594
28595 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28596 +extern const struct file_operations grsec_fops;
28597 +#endif
28598 +
28599 static inline unsigned long size_inside_page(unsigned long start,
28600 unsigned long size)
28601 {
28602 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28603
28604 while (cursor < to) {
28605 if (!devmem_is_allowed(pfn)) {
28606 +#ifdef CONFIG_GRKERNSEC_KMEM
28607 + gr_handle_mem_readwrite(from, to);
28608 +#else
28609 printk(KERN_INFO
28610 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28611 current->comm, from, to);
28612 +#endif
28613 return 0;
28614 }
28615 cursor += PAGE_SIZE;
28616 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28617 }
28618 return 1;
28619 }
28620 +#elif defined(CONFIG_GRKERNSEC_KMEM)
28621 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28622 +{
28623 + return 0;
28624 +}
28625 #else
28626 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28627 {
28628 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28629
28630 while (count > 0) {
28631 unsigned long remaining;
28632 + char *temp;
28633
28634 sz = size_inside_page(p, count);
28635
28636 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28637 if (!ptr)
28638 return -EFAULT;
28639
28640 - remaining = copy_to_user(buf, ptr, sz);
28641 +#ifdef CONFIG_PAX_USERCOPY
28642 + temp = kmalloc(sz, GFP_KERNEL);
28643 + if (!temp) {
28644 + unxlate_dev_mem_ptr(p, ptr);
28645 + return -ENOMEM;
28646 + }
28647 + memcpy(temp, ptr, sz);
28648 +#else
28649 + temp = ptr;
28650 +#endif
28651 +
28652 + remaining = copy_to_user(buf, temp, sz);
28653 +
28654 +#ifdef CONFIG_PAX_USERCOPY
28655 + kfree(temp);
28656 +#endif
28657 +
28658 unxlate_dev_mem_ptr(p, ptr);
28659 if (remaining)
28660 return -EFAULT;
28661 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28662 size_t count, loff_t *ppos)
28663 {
28664 unsigned long p = *ppos;
28665 - ssize_t low_count, read, sz;
28666 + ssize_t low_count, read, sz, err = 0;
28667 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28668 - int err = 0;
28669
28670 read = 0;
28671 if (p < (unsigned long) high_memory) {
28672 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28673 }
28674 #endif
28675 while (low_count > 0) {
28676 + char *temp;
28677 +
28678 sz = size_inside_page(p, low_count);
28679
28680 /*
28681 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28682 */
28683 kbuf = xlate_dev_kmem_ptr((char *)p);
28684
28685 - if (copy_to_user(buf, kbuf, sz))
28686 +#ifdef CONFIG_PAX_USERCOPY
28687 + temp = kmalloc(sz, GFP_KERNEL);
28688 + if (!temp)
28689 + return -ENOMEM;
28690 + memcpy(temp, kbuf, sz);
28691 +#else
28692 + temp = kbuf;
28693 +#endif
28694 +
28695 + err = copy_to_user(buf, temp, sz);
28696 +
28697 +#ifdef CONFIG_PAX_USERCOPY
28698 + kfree(temp);
28699 +#endif
28700 +
28701 + if (err)
28702 return -EFAULT;
28703 buf += sz;
28704 p += sz;
28705 @@ -867,6 +914,9 @@ static const struct memdev {
28706 #ifdef CONFIG_CRASH_DUMP
28707 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28708 #endif
28709 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28710 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28711 +#endif
28712 };
28713
28714 static int memory_open(struct inode *inode, struct file *filp)
28715 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
28716 index da3cfee..a5a6606 100644
28717 --- a/drivers/char/nvram.c
28718 +++ b/drivers/char/nvram.c
28719 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
28720
28721 spin_unlock_irq(&rtc_lock);
28722
28723 - if (copy_to_user(buf, contents, tmp - contents))
28724 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
28725 return -EFAULT;
28726
28727 *ppos = i;
28728 diff --git a/drivers/char/random.c b/drivers/char/random.c
28729 index 54ca8b2..d58cb51 100644
28730 --- a/drivers/char/random.c
28731 +++ b/drivers/char/random.c
28732 @@ -261,8 +261,13 @@
28733 /*
28734 * Configuration information
28735 */
28736 +#ifdef CONFIG_GRKERNSEC_RANDNET
28737 +#define INPUT_POOL_WORDS 512
28738 +#define OUTPUT_POOL_WORDS 128
28739 +#else
28740 #define INPUT_POOL_WORDS 128
28741 #define OUTPUT_POOL_WORDS 32
28742 +#endif
28743 #define SEC_XFER_SIZE 512
28744 #define EXTRACT_SIZE 10
28745
28746 @@ -300,10 +305,17 @@ static struct poolinfo {
28747 int poolwords;
28748 int tap1, tap2, tap3, tap4, tap5;
28749 } poolinfo_table[] = {
28750 +#ifdef CONFIG_GRKERNSEC_RANDNET
28751 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28752 + { 512, 411, 308, 208, 104, 1 },
28753 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28754 + { 128, 103, 76, 51, 25, 1 },
28755 +#else
28756 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28757 { 128, 103, 76, 51, 25, 1 },
28758 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28759 { 32, 26, 20, 14, 7, 1 },
28760 +#endif
28761 #if 0
28762 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28763 { 2048, 1638, 1231, 819, 411, 1 },
28764 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
28765
28766 extract_buf(r, tmp);
28767 i = min_t(int, nbytes, EXTRACT_SIZE);
28768 - if (copy_to_user(buf, tmp, i)) {
28769 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
28770 ret = -EFAULT;
28771 break;
28772 }
28773 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28774 #include <linux/sysctl.h>
28775
28776 static int min_read_thresh = 8, min_write_thresh;
28777 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28778 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28779 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28780 static char sysctl_bootid[16];
28781
28782 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
28783 index 1ee8ce7..b778bef 100644
28784 --- a/drivers/char/sonypi.c
28785 +++ b/drivers/char/sonypi.c
28786 @@ -55,6 +55,7 @@
28787 #include <asm/uaccess.h>
28788 #include <asm/io.h>
28789 #include <asm/system.h>
28790 +#include <asm/local.h>
28791
28792 #include <linux/sonypi.h>
28793
28794 @@ -491,7 +492,7 @@ static struct sonypi_device {
28795 spinlock_t fifo_lock;
28796 wait_queue_head_t fifo_proc_list;
28797 struct fasync_struct *fifo_async;
28798 - int open_count;
28799 + local_t open_count;
28800 int model;
28801 struct input_dev *input_jog_dev;
28802 struct input_dev *input_key_dev;
28803 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
28804 static int sonypi_misc_release(struct inode *inode, struct file *file)
28805 {
28806 mutex_lock(&sonypi_device.lock);
28807 - sonypi_device.open_count--;
28808 + local_dec(&sonypi_device.open_count);
28809 mutex_unlock(&sonypi_device.lock);
28810 return 0;
28811 }
28812 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
28813 {
28814 mutex_lock(&sonypi_device.lock);
28815 /* Flush input queue on first open */
28816 - if (!sonypi_device.open_count)
28817 + if (!local_read(&sonypi_device.open_count))
28818 kfifo_reset(&sonypi_device.fifo);
28819 - sonypi_device.open_count++;
28820 + local_inc(&sonypi_device.open_count);
28821 mutex_unlock(&sonypi_device.lock);
28822
28823 return 0;
28824 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
28825 index 32362cf..32a96e9 100644
28826 --- a/drivers/char/tpm/tpm.c
28827 +++ b/drivers/char/tpm/tpm.c
28828 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
28829 chip->vendor.req_complete_val)
28830 goto out_recv;
28831
28832 - if ((status == chip->vendor.req_canceled)) {
28833 + if (status == chip->vendor.req_canceled) {
28834 dev_err(chip->dev, "Operation Canceled\n");
28835 rc = -ECANCELED;
28836 goto out;
28837 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
28838 index 0636520..169c1d0 100644
28839 --- a/drivers/char/tpm/tpm_bios.c
28840 +++ b/drivers/char/tpm/tpm_bios.c
28841 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
28842 event = addr;
28843
28844 if ((event->event_type == 0 && event->event_size == 0) ||
28845 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28846 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28847 return NULL;
28848
28849 return addr;
28850 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
28851 return NULL;
28852
28853 if ((event->event_type == 0 && event->event_size == 0) ||
28854 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28855 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28856 return NULL;
28857
28858 (*pos)++;
28859 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
28860 int i;
28861
28862 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28863 - seq_putc(m, data[i]);
28864 + if (!seq_putc(m, data[i]))
28865 + return -EFAULT;
28866
28867 return 0;
28868 }
28869 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
28870 log->bios_event_log_end = log->bios_event_log + len;
28871
28872 virt = acpi_os_map_memory(start, len);
28873 + if (!virt) {
28874 + kfree(log->bios_event_log);
28875 + log->bios_event_log = NULL;
28876 + return -EFAULT;
28877 + }
28878
28879 - memcpy(log->bios_event_log, virt, len);
28880 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
28881
28882 acpi_os_unmap_memory(virt, len);
28883 return 0;
28884 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
28885 index b58b561..c9088c8 100644
28886 --- a/drivers/char/virtio_console.c
28887 +++ b/drivers/char/virtio_console.c
28888 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
28889 if (to_user) {
28890 ssize_t ret;
28891
28892 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
28893 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
28894 if (ret)
28895 return -EFAULT;
28896 } else {
28897 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
28898 if (!port_has_data(port) && !port->host_connected)
28899 return 0;
28900
28901 - return fill_readbuf(port, ubuf, count, true);
28902 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
28903 }
28904
28905 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
28906 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
28907 index c9eee6d..f9d5280 100644
28908 --- a/drivers/edac/amd64_edac.c
28909 +++ b/drivers/edac/amd64_edac.c
28910 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
28911 * PCI core identifies what devices are on a system during boot, and then
28912 * inquiry this table to see if this driver is for a given device found.
28913 */
28914 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
28915 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
28916 {
28917 .vendor = PCI_VENDOR_ID_AMD,
28918 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
28919 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
28920 index e47e73b..348e0bd 100644
28921 --- a/drivers/edac/amd76x_edac.c
28922 +++ b/drivers/edac/amd76x_edac.c
28923 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
28924 edac_mc_free(mci);
28925 }
28926
28927 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
28928 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
28929 {
28930 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28931 AMD762},
28932 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
28933 index 1af531a..3a8ff27 100644
28934 --- a/drivers/edac/e752x_edac.c
28935 +++ b/drivers/edac/e752x_edac.c
28936 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
28937 edac_mc_free(mci);
28938 }
28939
28940 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
28941 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
28942 {
28943 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28944 E7520},
28945 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
28946 index 6ffb6d2..383d8d7 100644
28947 --- a/drivers/edac/e7xxx_edac.c
28948 +++ b/drivers/edac/e7xxx_edac.c
28949 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
28950 edac_mc_free(mci);
28951 }
28952
28953 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
28954 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
28955 {
28956 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28957 E7205},
28958 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
28959 index 97f5064..202b6e6 100644
28960 --- a/drivers/edac/edac_pci_sysfs.c
28961 +++ b/drivers/edac/edac_pci_sysfs.c
28962 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
28963 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28964 static int edac_pci_poll_msec = 1000; /* one second workq period */
28965
28966 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28967 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28968 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28969 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28970
28971 static struct kobject *edac_pci_top_main_kobj;
28972 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28973 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28974 edac_printk(KERN_CRIT, EDAC_PCI,
28975 "Signaled System Error on %s\n",
28976 pci_name(dev));
28977 - atomic_inc(&pci_nonparity_count);
28978 + atomic_inc_unchecked(&pci_nonparity_count);
28979 }
28980
28981 if (status & (PCI_STATUS_PARITY)) {
28982 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28983 "Master Data Parity Error on %s\n",
28984 pci_name(dev));
28985
28986 - atomic_inc(&pci_parity_count);
28987 + atomic_inc_unchecked(&pci_parity_count);
28988 }
28989
28990 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28991 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28992 "Detected Parity Error on %s\n",
28993 pci_name(dev));
28994
28995 - atomic_inc(&pci_parity_count);
28996 + atomic_inc_unchecked(&pci_parity_count);
28997 }
28998 }
28999
29000 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29001 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29002 "Signaled System Error on %s\n",
29003 pci_name(dev));
29004 - atomic_inc(&pci_nonparity_count);
29005 + atomic_inc_unchecked(&pci_nonparity_count);
29006 }
29007
29008 if (status & (PCI_STATUS_PARITY)) {
29009 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29010 "Master Data Parity Error on "
29011 "%s\n", pci_name(dev));
29012
29013 - atomic_inc(&pci_parity_count);
29014 + atomic_inc_unchecked(&pci_parity_count);
29015 }
29016
29017 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29018 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29019 "Detected Parity Error on %s\n",
29020 pci_name(dev));
29021
29022 - atomic_inc(&pci_parity_count);
29023 + atomic_inc_unchecked(&pci_parity_count);
29024 }
29025 }
29026 }
29027 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29028 if (!check_pci_errors)
29029 return;
29030
29031 - before_count = atomic_read(&pci_parity_count);
29032 + before_count = atomic_read_unchecked(&pci_parity_count);
29033
29034 /* scan all PCI devices looking for a Parity Error on devices and
29035 * bridges.
29036 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29037 /* Only if operator has selected panic on PCI Error */
29038 if (edac_pci_get_panic_on_pe()) {
29039 /* If the count is different 'after' from 'before' */
29040 - if (before_count != atomic_read(&pci_parity_count))
29041 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29042 panic("EDAC: PCI Parity Error");
29043 }
29044 }
29045 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29046 index c0510b3..6e2a954 100644
29047 --- a/drivers/edac/i3000_edac.c
29048 +++ b/drivers/edac/i3000_edac.c
29049 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29050 edac_mc_free(mci);
29051 }
29052
29053 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29054 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29055 {
29056 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29057 I3000},
29058 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29059 index 73f55e200..5faaf59 100644
29060 --- a/drivers/edac/i3200_edac.c
29061 +++ b/drivers/edac/i3200_edac.c
29062 @@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29063 edac_mc_free(mci);
29064 }
29065
29066 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29067 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29068 {
29069 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29070 I3200},
29071 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29072 index 4dc3ac2..67d05a6 100644
29073 --- a/drivers/edac/i5000_edac.c
29074 +++ b/drivers/edac/i5000_edac.c
29075 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29076 *
29077 * The "E500P" device is the first device supported.
29078 */
29079 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29080 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29081 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29082 .driver_data = I5000P},
29083
29084 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29085 index bcbdeec..9886d16 100644
29086 --- a/drivers/edac/i5100_edac.c
29087 +++ b/drivers/edac/i5100_edac.c
29088 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29089 edac_mc_free(mci);
29090 }
29091
29092 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29093 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29094 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29095 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29096 { 0, }
29097 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29098 index 74d6ec34..baff517 100644
29099 --- a/drivers/edac/i5400_edac.c
29100 +++ b/drivers/edac/i5400_edac.c
29101 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29102 *
29103 * The "E500P" device is the first device supported.
29104 */
29105 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29106 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29107 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29108 {0,} /* 0 terminated list. */
29109 };
29110 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29111 index 6104dba..e7ea8e1 100644
29112 --- a/drivers/edac/i7300_edac.c
29113 +++ b/drivers/edac/i7300_edac.c
29114 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29115 *
29116 * Has only 8086:360c PCI ID
29117 */
29118 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29119 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29120 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29121 {0,} /* 0 terminated list. */
29122 };
29123 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29124 index 8568d9b..42b2fa8 100644
29125 --- a/drivers/edac/i7core_edac.c
29126 +++ b/drivers/edac/i7core_edac.c
29127 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29128 /*
29129 * pci_device_id table for which devices we are looking for
29130 */
29131 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29132 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29133 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29134 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29135 {0,} /* 0 terminated list. */
29136 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29137 index 4329d39..f3022ef 100644
29138 --- a/drivers/edac/i82443bxgx_edac.c
29139 +++ b/drivers/edac/i82443bxgx_edac.c
29140 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29141
29142 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29143
29144 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29145 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29146 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29147 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29148 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29149 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29150 index 931a057..fd28340 100644
29151 --- a/drivers/edac/i82860_edac.c
29152 +++ b/drivers/edac/i82860_edac.c
29153 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29154 edac_mc_free(mci);
29155 }
29156
29157 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29158 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29159 {
29160 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29161 I82860},
29162 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29163 index 33864c6..01edc61 100644
29164 --- a/drivers/edac/i82875p_edac.c
29165 +++ b/drivers/edac/i82875p_edac.c
29166 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29167 edac_mc_free(mci);
29168 }
29169
29170 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29171 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29172 {
29173 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29174 I82875P},
29175 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29176 index 4184e01..dcb2cd3 100644
29177 --- a/drivers/edac/i82975x_edac.c
29178 +++ b/drivers/edac/i82975x_edac.c
29179 @@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29180 edac_mc_free(mci);
29181 }
29182
29183 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29184 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29185 {
29186 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29187 I82975X
29188 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29189 index 0106747..0b40417 100644
29190 --- a/drivers/edac/mce_amd.h
29191 +++ b/drivers/edac/mce_amd.h
29192 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
29193 bool (*dc_mce)(u16, u8);
29194 bool (*ic_mce)(u16, u8);
29195 bool (*nb_mce)(u16, u8);
29196 -};
29197 +} __no_const;
29198
29199 void amd_report_gart_errors(bool);
29200 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29201 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29202 index e294e1b..a41b05b 100644
29203 --- a/drivers/edac/r82600_edac.c
29204 +++ b/drivers/edac/r82600_edac.c
29205 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29206 edac_mc_free(mci);
29207 }
29208
29209 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29210 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29211 {
29212 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29213 },
29214 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29215 index 1dc118d..8c68af9 100644
29216 --- a/drivers/edac/sb_edac.c
29217 +++ b/drivers/edac/sb_edac.c
29218 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29219 /*
29220 * pci_device_id table for which devices we are looking for
29221 */
29222 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29223 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29224 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29225 {0,} /* 0 terminated list. */
29226 };
29227 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29228 index b6f47de..c5acf3a 100644
29229 --- a/drivers/edac/x38_edac.c
29230 +++ b/drivers/edac/x38_edac.c
29231 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29232 edac_mc_free(mci);
29233 }
29234
29235 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29236 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29237 {
29238 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29239 X38},
29240 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29241 index 85661b0..c784559a 100644
29242 --- a/drivers/firewire/core-card.c
29243 +++ b/drivers/firewire/core-card.c
29244 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29245
29246 void fw_core_remove_card(struct fw_card *card)
29247 {
29248 - struct fw_card_driver dummy_driver = dummy_driver_template;
29249 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29250
29251 card->driver->update_phy_reg(card, 4,
29252 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29253 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29254 index 4799393..37bd3ab 100644
29255 --- a/drivers/firewire/core-cdev.c
29256 +++ b/drivers/firewire/core-cdev.c
29257 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29258 int ret;
29259
29260 if ((request->channels == 0 && request->bandwidth == 0) ||
29261 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29262 - request->bandwidth < 0)
29263 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29264 return -EINVAL;
29265
29266 r = kmalloc(sizeof(*r), GFP_KERNEL);
29267 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29268 index 855ab3f..11f4bbd 100644
29269 --- a/drivers/firewire/core-transaction.c
29270 +++ b/drivers/firewire/core-transaction.c
29271 @@ -37,6 +37,7 @@
29272 #include <linux/timer.h>
29273 #include <linux/types.h>
29274 #include <linux/workqueue.h>
29275 +#include <linux/sched.h>
29276
29277 #include <asm/byteorder.h>
29278
29279 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29280 index b45be57..5fad18b 100644
29281 --- a/drivers/firewire/core.h
29282 +++ b/drivers/firewire/core.h
29283 @@ -101,6 +101,7 @@ struct fw_card_driver {
29284
29285 int (*stop_iso)(struct fw_iso_context *ctx);
29286 };
29287 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29288
29289 void fw_card_initialize(struct fw_card *card,
29290 const struct fw_card_driver *driver, struct device *device);
29291 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29292 index 153980b..4b4d046 100644
29293 --- a/drivers/firmware/dmi_scan.c
29294 +++ b/drivers/firmware/dmi_scan.c
29295 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29296 }
29297 }
29298 else {
29299 - /*
29300 - * no iounmap() for that ioremap(); it would be a no-op, but
29301 - * it's so early in setup that sucker gets confused into doing
29302 - * what it shouldn't if we actually call it.
29303 - */
29304 p = dmi_ioremap(0xF0000, 0x10000);
29305 if (p == NULL)
29306 goto error;
29307 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29308 if (buf == NULL)
29309 return -1;
29310
29311 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29312 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29313
29314 iounmap(buf);
29315 return 0;
29316 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29317 index 82d5c20..44a7177 100644
29318 --- a/drivers/gpio/gpio-vr41xx.c
29319 +++ b/drivers/gpio/gpio-vr41xx.c
29320 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29321 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29322 maskl, pendl, maskh, pendh);
29323
29324 - atomic_inc(&irq_err_count);
29325 + atomic_inc_unchecked(&irq_err_count);
29326
29327 return -EINVAL;
29328 }
29329 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29330 index 84a4a80..ce0306e 100644
29331 --- a/drivers/gpu/drm/drm_crtc_helper.c
29332 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29333 @@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29334 struct drm_crtc *tmp;
29335 int crtc_mask = 1;
29336
29337 - WARN(!crtc, "checking null crtc?\n");
29338 + BUG_ON(!crtc);
29339
29340 dev = crtc->dev;
29341
29342 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29343 index ebf7d3f..d64c436 100644
29344 --- a/drivers/gpu/drm/drm_drv.c
29345 +++ b/drivers/gpu/drm/drm_drv.c
29346 @@ -312,7 +312,7 @@ module_exit(drm_core_exit);
29347 /**
29348 * Copy and IOCTL return string to user space
29349 */
29350 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29351 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29352 {
29353 int len;
29354
29355 @@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
29356
29357 dev = file_priv->minor->dev;
29358 atomic_inc(&dev->ioctl_count);
29359 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29360 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29361 ++file_priv->ioctl_count;
29362
29363 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29364 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29365 index 6263b01..7987f55 100644
29366 --- a/drivers/gpu/drm/drm_fops.c
29367 +++ b/drivers/gpu/drm/drm_fops.c
29368 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29369 }
29370
29371 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29372 - atomic_set(&dev->counts[i], 0);
29373 + atomic_set_unchecked(&dev->counts[i], 0);
29374
29375 dev->sigdata.lock = NULL;
29376
29377 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29378
29379 retcode = drm_open_helper(inode, filp, dev);
29380 if (!retcode) {
29381 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29382 - if (!dev->open_count++)
29383 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29384 + if (local_inc_return(&dev->open_count) == 1)
29385 retcode = drm_setup(dev);
29386 }
29387 if (!retcode) {
29388 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29389
29390 mutex_lock(&drm_global_mutex);
29391
29392 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29393 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29394
29395 if (dev->driver->preclose)
29396 dev->driver->preclose(dev, file_priv);
29397 @@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
29398 * Begin inline drm_release
29399 */
29400
29401 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29402 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29403 task_pid_nr(current),
29404 (long)old_encode_dev(file_priv->minor->device),
29405 - dev->open_count);
29406 + local_read(&dev->open_count));
29407
29408 /* Release any auth tokens that might point to this file_priv,
29409 (do that under the drm_global_mutex) */
29410 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29411 * End inline drm_release
29412 */
29413
29414 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29415 - if (!--dev->open_count) {
29416 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29417 + if (local_dec_and_test(&dev->open_count)) {
29418 if (atomic_read(&dev->ioctl_count)) {
29419 DRM_ERROR("Device busy: %d\n",
29420 atomic_read(&dev->ioctl_count));
29421 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29422 index c87dc96..326055d 100644
29423 --- a/drivers/gpu/drm/drm_global.c
29424 +++ b/drivers/gpu/drm/drm_global.c
29425 @@ -36,7 +36,7 @@
29426 struct drm_global_item {
29427 struct mutex mutex;
29428 void *object;
29429 - int refcount;
29430 + atomic_t refcount;
29431 };
29432
29433 static struct drm_global_item glob[DRM_GLOBAL_NUM];
29434 @@ -49,7 +49,7 @@ void drm_global_init(void)
29435 struct drm_global_item *item = &glob[i];
29436 mutex_init(&item->mutex);
29437 item->object = NULL;
29438 - item->refcount = 0;
29439 + atomic_set(&item->refcount, 0);
29440 }
29441 }
29442
29443 @@ -59,7 +59,7 @@ void drm_global_release(void)
29444 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
29445 struct drm_global_item *item = &glob[i];
29446 BUG_ON(item->object != NULL);
29447 - BUG_ON(item->refcount != 0);
29448 + BUG_ON(atomic_read(&item->refcount) != 0);
29449 }
29450 }
29451
29452 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29453 void *object;
29454
29455 mutex_lock(&item->mutex);
29456 - if (item->refcount == 0) {
29457 + if (atomic_read(&item->refcount) == 0) {
29458 item->object = kzalloc(ref->size, GFP_KERNEL);
29459 if (unlikely(item->object == NULL)) {
29460 ret = -ENOMEM;
29461 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29462 goto out_err;
29463
29464 }
29465 - ++item->refcount;
29466 + atomic_inc(&item->refcount);
29467 ref->object = item->object;
29468 object = item->object;
29469 mutex_unlock(&item->mutex);
29470 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
29471 struct drm_global_item *item = &glob[ref->global_type];
29472
29473 mutex_lock(&item->mutex);
29474 - BUG_ON(item->refcount == 0);
29475 + BUG_ON(atomic_read(&item->refcount) == 0);
29476 BUG_ON(ref->object != item->object);
29477 - if (--item->refcount == 0) {
29478 + if (atomic_dec_and_test(&item->refcount)) {
29479 ref->release(ref);
29480 item->object = NULL;
29481 }
29482 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
29483 index ab1162d..42587b2 100644
29484 --- a/drivers/gpu/drm/drm_info.c
29485 +++ b/drivers/gpu/drm/drm_info.c
29486 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
29487 struct drm_local_map *map;
29488 struct drm_map_list *r_list;
29489
29490 - /* Hardcoded from _DRM_FRAME_BUFFER,
29491 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29492 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29493 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29494 + static const char * const types[] = {
29495 + [_DRM_FRAME_BUFFER] = "FB",
29496 + [_DRM_REGISTERS] = "REG",
29497 + [_DRM_SHM] = "SHM",
29498 + [_DRM_AGP] = "AGP",
29499 + [_DRM_SCATTER_GATHER] = "SG",
29500 + [_DRM_CONSISTENT] = "PCI",
29501 + [_DRM_GEM] = "GEM" };
29502 const char *type;
29503 int i;
29504
29505 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
29506 map = r_list->map;
29507 if (!map)
29508 continue;
29509 - if (map->type < 0 || map->type > 5)
29510 + if (map->type >= ARRAY_SIZE(types))
29511 type = "??";
29512 else
29513 type = types[map->type];
29514 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
29515 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29516 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29517 vma->vm_flags & VM_IO ? 'i' : '-',
29518 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29519 + 0);
29520 +#else
29521 vma->vm_pgoff);
29522 +#endif
29523
29524 #if defined(__i386__)
29525 pgprot = pgprot_val(vma->vm_page_prot);
29526 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
29527 index 637fcc3..e890b33 100644
29528 --- a/drivers/gpu/drm/drm_ioc32.c
29529 +++ b/drivers/gpu/drm/drm_ioc32.c
29530 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
29531 request = compat_alloc_user_space(nbytes);
29532 if (!access_ok(VERIFY_WRITE, request, nbytes))
29533 return -EFAULT;
29534 - list = (struct drm_buf_desc *) (request + 1);
29535 + list = (struct drm_buf_desc __user *) (request + 1);
29536
29537 if (__put_user(count, &request->count)
29538 || __put_user(list, &request->list))
29539 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
29540 request = compat_alloc_user_space(nbytes);
29541 if (!access_ok(VERIFY_WRITE, request, nbytes))
29542 return -EFAULT;
29543 - list = (struct drm_buf_pub *) (request + 1);
29544 + list = (struct drm_buf_pub __user *) (request + 1);
29545
29546 if (__put_user(count, &request->count)
29547 || __put_user(list, &request->list))
29548 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
29549 index 956fd38..e52167a 100644
29550 --- a/drivers/gpu/drm/drm_ioctl.c
29551 +++ b/drivers/gpu/drm/drm_ioctl.c
29552 @@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
29553 stats->data[i].value =
29554 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29555 else
29556 - stats->data[i].value = atomic_read(&dev->counts[i]);
29557 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29558 stats->data[i].type = dev->types[i];
29559 }
29560
29561 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
29562 index c79c713..2048588 100644
29563 --- a/drivers/gpu/drm/drm_lock.c
29564 +++ b/drivers/gpu/drm/drm_lock.c
29565 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29566 if (drm_lock_take(&master->lock, lock->context)) {
29567 master->lock.file_priv = file_priv;
29568 master->lock.lock_time = jiffies;
29569 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29570 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29571 break; /* Got lock */
29572 }
29573
29574 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29575 return -EINVAL;
29576 }
29577
29578 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29579 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29580
29581 if (drm_lock_free(&master->lock, lock->context)) {
29582 /* FIXME: Should really bail out here. */
29583 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
29584 index 7f4b4e1..bf4def2 100644
29585 --- a/drivers/gpu/drm/i810/i810_dma.c
29586 +++ b/drivers/gpu/drm/i810/i810_dma.c
29587 @@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
29588 dma->buflist[vertex->idx],
29589 vertex->discard, vertex->used);
29590
29591 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29592 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29593 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29594 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29595 sarea_priv->last_enqueue = dev_priv->counter - 1;
29596 sarea_priv->last_dispatch = (int)hw_status[5];
29597
29598 @@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
29599 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29600 mc->last_render);
29601
29602 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29603 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29604 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29605 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29606 sarea_priv->last_enqueue = dev_priv->counter - 1;
29607 sarea_priv->last_dispatch = (int)hw_status[5];
29608
29609 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
29610 index c9339f4..f5e1b9d 100644
29611 --- a/drivers/gpu/drm/i810/i810_drv.h
29612 +++ b/drivers/gpu/drm/i810/i810_drv.h
29613 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29614 int page_flipping;
29615
29616 wait_queue_head_t irq_queue;
29617 - atomic_t irq_received;
29618 - atomic_t irq_emitted;
29619 + atomic_unchecked_t irq_received;
29620 + atomic_unchecked_t irq_emitted;
29621
29622 int front_offset;
29623 } drm_i810_private_t;
29624 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
29625 index deaa657..e0fd296 100644
29626 --- a/drivers/gpu/drm/i915/i915_debugfs.c
29627 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
29628 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
29629 I915_READ(GTIMR));
29630 }
29631 seq_printf(m, "Interrupts received: %d\n",
29632 - atomic_read(&dev_priv->irq_received));
29633 + atomic_read_unchecked(&dev_priv->irq_received));
29634 for (i = 0; i < I915_NUM_RINGS; i++) {
29635 if (IS_GEN6(dev) || IS_GEN7(dev)) {
29636 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
29637 @@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
29638 return ret;
29639
29640 if (opregion->header)
29641 - seq_write(m, opregion->header, OPREGION_SIZE);
29642 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
29643
29644 mutex_unlock(&dev->struct_mutex);
29645
29646 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
29647 index ddfe3d9..f6e6b21 100644
29648 --- a/drivers/gpu/drm/i915/i915_dma.c
29649 +++ b/drivers/gpu/drm/i915/i915_dma.c
29650 @@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
29651 bool can_switch;
29652
29653 spin_lock(&dev->count_lock);
29654 - can_switch = (dev->open_count == 0);
29655 + can_switch = (local_read(&dev->open_count) == 0);
29656 spin_unlock(&dev->count_lock);
29657 return can_switch;
29658 }
29659 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
29660 index 9689ca3..294f9c1 100644
29661 --- a/drivers/gpu/drm/i915/i915_drv.h
29662 +++ b/drivers/gpu/drm/i915/i915_drv.h
29663 @@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
29664 /* render clock increase/decrease */
29665 /* display clock increase/decrease */
29666 /* pll clock increase/decrease */
29667 -};
29668 +} __no_const;
29669
29670 struct intel_device_info {
29671 u8 gen;
29672 @@ -320,7 +320,7 @@ typedef struct drm_i915_private {
29673 int current_page;
29674 int page_flipping;
29675
29676 - atomic_t irq_received;
29677 + atomic_unchecked_t irq_received;
29678
29679 /* protects the irq masks */
29680 spinlock_t irq_lock;
29681 @@ -896,7 +896,7 @@ struct drm_i915_gem_object {
29682 * will be page flipped away on the next vblank. When it
29683 * reaches 0, dev_priv->pending_flip_queue will be woken up.
29684 */
29685 - atomic_t pending_flip;
29686 + atomic_unchecked_t pending_flip;
29687 };
29688
29689 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
29690 @@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
29691 extern void intel_teardown_gmbus(struct drm_device *dev);
29692 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
29693 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
29694 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29695 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29696 {
29697 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
29698 }
29699 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29700 index 65e1f00..a30ef00 100644
29701 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29702 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29703 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
29704 i915_gem_clflush_object(obj);
29705
29706 if (obj->base.pending_write_domain)
29707 - cd->flips |= atomic_read(&obj->pending_flip);
29708 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
29709
29710 /* The actual obj->write_domain will be updated with
29711 * pending_write_domain after we emit the accumulated flush for all
29712 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
29713
29714 static int
29715 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
29716 - int count)
29717 + unsigned int count)
29718 {
29719 - int i;
29720 + unsigned int i;
29721
29722 for (i = 0; i < count; i++) {
29723 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
29724 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
29725 index 5bd4361..0241a42 100644
29726 --- a/drivers/gpu/drm/i915/i915_irq.c
29727 +++ b/drivers/gpu/drm/i915/i915_irq.c
29728 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
29729 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
29730 struct drm_i915_master_private *master_priv;
29731
29732 - atomic_inc(&dev_priv->irq_received);
29733 + atomic_inc_unchecked(&dev_priv->irq_received);
29734
29735 /* disable master interrupt before clearing iir */
29736 de_ier = I915_READ(DEIER);
29737 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
29738 struct drm_i915_master_private *master_priv;
29739 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
29740
29741 - atomic_inc(&dev_priv->irq_received);
29742 + atomic_inc_unchecked(&dev_priv->irq_received);
29743
29744 if (IS_GEN6(dev))
29745 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
29746 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
29747 int ret = IRQ_NONE, pipe;
29748 bool blc_event = false;
29749
29750 - atomic_inc(&dev_priv->irq_received);
29751 + atomic_inc_unchecked(&dev_priv->irq_received);
29752
29753 iir = I915_READ(IIR);
29754
29755 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
29756 {
29757 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29758
29759 - atomic_set(&dev_priv->irq_received, 0);
29760 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29761
29762 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29763 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29764 @@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
29765 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29766 int pipe;
29767
29768 - atomic_set(&dev_priv->irq_received, 0);
29769 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29770
29771 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29772 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29773 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
29774 index 397087c..9178d0d 100644
29775 --- a/drivers/gpu/drm/i915/intel_display.c
29776 +++ b/drivers/gpu/drm/i915/intel_display.c
29777 @@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
29778
29779 wait_event(dev_priv->pending_flip_queue,
29780 atomic_read(&dev_priv->mm.wedged) ||
29781 - atomic_read(&obj->pending_flip) == 0);
29782 + atomic_read_unchecked(&obj->pending_flip) == 0);
29783
29784 /* Big Hammer, we also need to ensure that any pending
29785 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
29786 @@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
29787 obj = to_intel_framebuffer(crtc->fb)->obj;
29788 dev_priv = crtc->dev->dev_private;
29789 wait_event(dev_priv->pending_flip_queue,
29790 - atomic_read(&obj->pending_flip) == 0);
29791 + atomic_read_unchecked(&obj->pending_flip) == 0);
29792 }
29793
29794 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
29795 @@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
29796
29797 atomic_clear_mask(1 << intel_crtc->plane,
29798 &obj->pending_flip.counter);
29799 - if (atomic_read(&obj->pending_flip) == 0)
29800 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
29801 wake_up(&dev_priv->pending_flip_queue);
29802
29803 schedule_work(&work->work);
29804 @@ -7461,7 +7461,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29805 /* Block clients from rendering to the new back buffer until
29806 * the flip occurs and the object is no longer visible.
29807 */
29808 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29809 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29810
29811 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
29812 if (ret)
29813 @@ -7475,7 +7475,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29814 return 0;
29815
29816 cleanup_pending:
29817 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29818 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29819 drm_gem_object_unreference(&work->old_fb_obj->base);
29820 drm_gem_object_unreference(&obj->base);
29821 mutex_unlock(&dev->struct_mutex);
29822 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
29823 index 54558a0..2d97005 100644
29824 --- a/drivers/gpu/drm/mga/mga_drv.h
29825 +++ b/drivers/gpu/drm/mga/mga_drv.h
29826 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29827 u32 clear_cmd;
29828 u32 maccess;
29829
29830 - atomic_t vbl_received; /**< Number of vblanks received. */
29831 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29832 wait_queue_head_t fence_queue;
29833 - atomic_t last_fence_retired;
29834 + atomic_unchecked_t last_fence_retired;
29835 u32 next_fence_to_post;
29836
29837 unsigned int fb_cpp;
29838 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
29839 index 2581202..f230a8d9 100644
29840 --- a/drivers/gpu/drm/mga/mga_irq.c
29841 +++ b/drivers/gpu/drm/mga/mga_irq.c
29842 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
29843 if (crtc != 0)
29844 return 0;
29845
29846 - return atomic_read(&dev_priv->vbl_received);
29847 + return atomic_read_unchecked(&dev_priv->vbl_received);
29848 }
29849
29850
29851 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29852 /* VBLANK interrupt */
29853 if (status & MGA_VLINEPEN) {
29854 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29855 - atomic_inc(&dev_priv->vbl_received);
29856 + atomic_inc_unchecked(&dev_priv->vbl_received);
29857 drm_handle_vblank(dev, 0);
29858 handled = 1;
29859 }
29860 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29861 if ((prim_start & ~0x03) != (prim_end & ~0x03))
29862 MGA_WRITE(MGA_PRIMEND, prim_end);
29863
29864 - atomic_inc(&dev_priv->last_fence_retired);
29865 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29866 DRM_WAKEUP(&dev_priv->fence_queue);
29867 handled = 1;
29868 }
29869 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
29870 * using fences.
29871 */
29872 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29873 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29874 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29875 - *sequence) <= (1 << 23)));
29876
29877 *sequence = cur_fence;
29878 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
29879 index e5cbead..6c354a3 100644
29880 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
29881 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
29882 @@ -199,7 +199,7 @@ struct methods {
29883 const char desc[8];
29884 void (*loadbios)(struct drm_device *, uint8_t *);
29885 const bool rw;
29886 -};
29887 +} __do_const;
29888
29889 static struct methods shadow_methods[] = {
29890 { "PRAMIN", load_vbios_pramin, true },
29891 @@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
29892 struct bit_table {
29893 const char id;
29894 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
29895 -};
29896 +} __no_const;
29897
29898 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
29899
29900 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
29901 index b827098..c31a797 100644
29902 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
29903 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
29904 @@ -242,7 +242,7 @@ struct nouveau_channel {
29905 struct list_head pending;
29906 uint32_t sequence;
29907 uint32_t sequence_ack;
29908 - atomic_t last_sequence_irq;
29909 + atomic_unchecked_t last_sequence_irq;
29910 struct nouveau_vma vma;
29911 } fence;
29912
29913 @@ -323,7 +323,7 @@ struct nouveau_exec_engine {
29914 u32 handle, u16 class);
29915 void (*set_tile_region)(struct drm_device *dev, int i);
29916 void (*tlb_flush)(struct drm_device *, int engine);
29917 -};
29918 +} __no_const;
29919
29920 struct nouveau_instmem_engine {
29921 void *priv;
29922 @@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
29923 struct nouveau_mc_engine {
29924 int (*init)(struct drm_device *dev);
29925 void (*takedown)(struct drm_device *dev);
29926 -};
29927 +} __no_const;
29928
29929 struct nouveau_timer_engine {
29930 int (*init)(struct drm_device *dev);
29931 void (*takedown)(struct drm_device *dev);
29932 uint64_t (*read)(struct drm_device *dev);
29933 -};
29934 +} __no_const;
29935
29936 struct nouveau_fb_engine {
29937 int num_tiles;
29938 @@ -566,7 +566,7 @@ struct nouveau_vram_engine {
29939 void (*put)(struct drm_device *, struct nouveau_mem **);
29940
29941 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
29942 -};
29943 +} __no_const;
29944
29945 struct nouveau_engine {
29946 struct nouveau_instmem_engine instmem;
29947 @@ -714,7 +714,7 @@ struct drm_nouveau_private {
29948 struct drm_global_reference mem_global_ref;
29949 struct ttm_bo_global_ref bo_global_ref;
29950 struct ttm_bo_device bdev;
29951 - atomic_t validate_sequence;
29952 + atomic_unchecked_t validate_sequence;
29953 } ttm;
29954
29955 struct {
29956 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
29957 index 2f6daae..c9d7b9e 100644
29958 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
29959 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
29960 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
29961 if (USE_REFCNT(dev))
29962 sequence = nvchan_rd32(chan, 0x48);
29963 else
29964 - sequence = atomic_read(&chan->fence.last_sequence_irq);
29965 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
29966
29967 if (chan->fence.sequence_ack == sequence)
29968 goto out;
29969 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
29970 return ret;
29971 }
29972
29973 - atomic_set(&chan->fence.last_sequence_irq, 0);
29974 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
29975 return 0;
29976 }
29977
29978 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
29979 index 7ce3fde..cb3ea04 100644
29980 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
29981 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
29982 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
29983 int trycnt = 0;
29984 int ret, i;
29985
29986 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
29987 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
29988 retry:
29989 if (++trycnt > 100000) {
29990 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
29991 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
29992 index f80c5e0..936baa7 100644
29993 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
29994 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
29995 @@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
29996 bool can_switch;
29997
29998 spin_lock(&dev->count_lock);
29999 - can_switch = (dev->open_count == 0);
30000 + can_switch = (local_read(&dev->open_count) == 0);
30001 spin_unlock(&dev->count_lock);
30002 return can_switch;
30003 }
30004 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30005 index dbdea8e..cd6eeeb 100644
30006 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30007 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30008 @@ -554,7 +554,7 @@ static int
30009 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30010 u32 class, u32 mthd, u32 data)
30011 {
30012 - atomic_set(&chan->fence.last_sequence_irq, data);
30013 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30014 return 0;
30015 }
30016
30017 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30018 index bcac90b..53bfc76 100644
30019 --- a/drivers/gpu/drm/r128/r128_cce.c
30020 +++ b/drivers/gpu/drm/r128/r128_cce.c
30021 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30022
30023 /* GH: Simple idle check.
30024 */
30025 - atomic_set(&dev_priv->idle_count, 0);
30026 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30027
30028 /* We don't support anything other than bus-mastering ring mode,
30029 * but the ring can be in either AGP or PCI space for the ring
30030 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30031 index 930c71b..499aded 100644
30032 --- a/drivers/gpu/drm/r128/r128_drv.h
30033 +++ b/drivers/gpu/drm/r128/r128_drv.h
30034 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30035 int is_pci;
30036 unsigned long cce_buffers_offset;
30037
30038 - atomic_t idle_count;
30039 + atomic_unchecked_t idle_count;
30040
30041 int page_flipping;
30042 int current_page;
30043 u32 crtc_offset;
30044 u32 crtc_offset_cntl;
30045
30046 - atomic_t vbl_received;
30047 + atomic_unchecked_t vbl_received;
30048
30049 u32 color_fmt;
30050 unsigned int front_offset;
30051 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30052 index 429d5a0..7e899ed 100644
30053 --- a/drivers/gpu/drm/r128/r128_irq.c
30054 +++ b/drivers/gpu/drm/r128/r128_irq.c
30055 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30056 if (crtc != 0)
30057 return 0;
30058
30059 - return atomic_read(&dev_priv->vbl_received);
30060 + return atomic_read_unchecked(&dev_priv->vbl_received);
30061 }
30062
30063 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30064 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30065 /* VBLANK interrupt */
30066 if (status & R128_CRTC_VBLANK_INT) {
30067 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30068 - atomic_inc(&dev_priv->vbl_received);
30069 + atomic_inc_unchecked(&dev_priv->vbl_received);
30070 drm_handle_vblank(dev, 0);
30071 return IRQ_HANDLED;
30072 }
30073 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30074 index a9e33ce..09edd4b 100644
30075 --- a/drivers/gpu/drm/r128/r128_state.c
30076 +++ b/drivers/gpu/drm/r128/r128_state.c
30077 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30078
30079 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30080 {
30081 - if (atomic_read(&dev_priv->idle_count) == 0)
30082 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30083 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30084 else
30085 - atomic_set(&dev_priv->idle_count, 0);
30086 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30087 }
30088
30089 #endif
30090 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30091 index 5a82b6b..9e69c73 100644
30092 --- a/drivers/gpu/drm/radeon/mkregtable.c
30093 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30094 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30095 regex_t mask_rex;
30096 regmatch_t match[4];
30097 char buf[1024];
30098 - size_t end;
30099 + long end;
30100 int len;
30101 int done = 0;
30102 int r;
30103 unsigned o;
30104 struct offset *offset;
30105 char last_reg_s[10];
30106 - int last_reg;
30107 + unsigned long last_reg;
30108
30109 if (regcomp
30110 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30111 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30112 index 1668ec1..30ebdab 100644
30113 --- a/drivers/gpu/drm/radeon/radeon.h
30114 +++ b/drivers/gpu/drm/radeon/radeon.h
30115 @@ -250,7 +250,7 @@ struct radeon_fence_driver {
30116 uint32_t scratch_reg;
30117 uint64_t gpu_addr;
30118 volatile uint32_t *cpu_addr;
30119 - atomic_t seq;
30120 + atomic_unchecked_t seq;
30121 uint32_t last_seq;
30122 unsigned long last_jiffies;
30123 unsigned long last_timeout;
30124 @@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
30125 int x2, int y2);
30126 void (*draw_auto)(struct radeon_device *rdev);
30127 void (*set_default_state)(struct radeon_device *rdev);
30128 -};
30129 +} __no_const;
30130
30131 struct r600_blit {
30132 struct mutex mutex;
30133 @@ -1201,7 +1201,7 @@ struct radeon_asic {
30134 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30135 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30136 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30137 -};
30138 +} __no_const;
30139
30140 /*
30141 * Asic structures
30142 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30143 index 49f7cb7..2fcb48f 100644
30144 --- a/drivers/gpu/drm/radeon/radeon_device.c
30145 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30146 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30147 bool can_switch;
30148
30149 spin_lock(&dev->count_lock);
30150 - can_switch = (dev->open_count == 0);
30151 + can_switch = (local_read(&dev->open_count) == 0);
30152 spin_unlock(&dev->count_lock);
30153 return can_switch;
30154 }
30155 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30156 index a1b59ca..86f2d44 100644
30157 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30158 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30159 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30160
30161 /* SW interrupt */
30162 wait_queue_head_t swi_queue;
30163 - atomic_t swi_emitted;
30164 + atomic_unchecked_t swi_emitted;
30165 int vblank_crtc;
30166 uint32_t irq_enable_reg;
30167 uint32_t r500_disp_irq_reg;
30168 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30169 index 4bd36a3..e66fe9c 100644
30170 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30171 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30172 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30173 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30174 return 0;
30175 }
30176 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30177 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30178 if (!rdev->ring[fence->ring].ready)
30179 /* FIXME: cp is not running assume everythings is done right
30180 * away
30181 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30182 }
30183 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30184 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30185 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30186 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30187 rdev->fence_drv[ring].initialized = true;
30188 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30189 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30190 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30191 rdev->fence_drv[ring].scratch_reg = -1;
30192 rdev->fence_drv[ring].cpu_addr = NULL;
30193 rdev->fence_drv[ring].gpu_addr = 0;
30194 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30195 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30196 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30197 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30198 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30199 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30200 index 48b7cea..342236f 100644
30201 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30202 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30203 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30204 request = compat_alloc_user_space(sizeof(*request));
30205 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30206 || __put_user(req32.param, &request->param)
30207 - || __put_user((void __user *)(unsigned long)req32.value,
30208 + || __put_user((unsigned long)req32.value,
30209 &request->value))
30210 return -EFAULT;
30211
30212 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30213 index 00da384..32f972d 100644
30214 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30215 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30216 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30217 unsigned int ret;
30218 RING_LOCALS;
30219
30220 - atomic_inc(&dev_priv->swi_emitted);
30221 - ret = atomic_read(&dev_priv->swi_emitted);
30222 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30223 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30224
30225 BEGIN_RING(4);
30226 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30227 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30228 drm_radeon_private_t *dev_priv =
30229 (drm_radeon_private_t *) dev->dev_private;
30230
30231 - atomic_set(&dev_priv->swi_emitted, 0);
30232 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30233 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30234
30235 dev->max_vblank_count = 0x001fffff;
30236 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30237 index e8422ae..d22d4a8 100644
30238 --- a/drivers/gpu/drm/radeon/radeon_state.c
30239 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30240 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30241 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30242 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30243
30244 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30245 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30246 sarea_priv->nbox * sizeof(depth_boxes[0])))
30247 return -EFAULT;
30248
30249 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30250 {
30251 drm_radeon_private_t *dev_priv = dev->dev_private;
30252 drm_radeon_getparam_t *param = data;
30253 - int value;
30254 + int value = 0;
30255
30256 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30257
30258 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30259 index c421e77..e6bf2e8 100644
30260 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30261 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30262 @@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30263 }
30264 if (unlikely(ttm_vm_ops == NULL)) {
30265 ttm_vm_ops = vma->vm_ops;
30266 - radeon_ttm_vm_ops = *ttm_vm_ops;
30267 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30268 + pax_open_kernel();
30269 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30270 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30271 + pax_close_kernel();
30272 }
30273 vma->vm_ops = &radeon_ttm_vm_ops;
30274 return 0;
30275 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30276 index f68dff2..8df955c 100644
30277 --- a/drivers/gpu/drm/radeon/rs690.c
30278 +++ b/drivers/gpu/drm/radeon/rs690.c
30279 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30280 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30281 rdev->pm.sideport_bandwidth.full)
30282 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30283 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30284 + read_delay_latency.full = dfixed_const(800 * 1000);
30285 read_delay_latency.full = dfixed_div(read_delay_latency,
30286 rdev->pm.igp_sideport_mclk);
30287 + a.full = dfixed_const(370);
30288 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30289 } else {
30290 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30291 rdev->pm.k8_bandwidth.full)
30292 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30293 index 499debd..66fce72 100644
30294 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30295 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30296 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30297 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30298 struct shrink_control *sc)
30299 {
30300 - static atomic_t start_pool = ATOMIC_INIT(0);
30301 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30302 unsigned i;
30303 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30304 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30305 struct ttm_page_pool *pool;
30306 int shrink_pages = sc->nr_to_scan;
30307
30308 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30309 index 88edacc..1e5412b 100644
30310 --- a/drivers/gpu/drm/via/via_drv.h
30311 +++ b/drivers/gpu/drm/via/via_drv.h
30312 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30313 typedef uint32_t maskarray_t[5];
30314
30315 typedef struct drm_via_irq {
30316 - atomic_t irq_received;
30317 + atomic_unchecked_t irq_received;
30318 uint32_t pending_mask;
30319 uint32_t enable_mask;
30320 wait_queue_head_t irq_queue;
30321 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30322 struct timeval last_vblank;
30323 int last_vblank_valid;
30324 unsigned usec_per_vblank;
30325 - atomic_t vbl_received;
30326 + atomic_unchecked_t vbl_received;
30327 drm_via_state_t hc_state;
30328 char pci_buf[VIA_PCI_BUF_SIZE];
30329 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30330 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30331 index d391f48..10c8ca3 100644
30332 --- a/drivers/gpu/drm/via/via_irq.c
30333 +++ b/drivers/gpu/drm/via/via_irq.c
30334 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30335 if (crtc != 0)
30336 return 0;
30337
30338 - return atomic_read(&dev_priv->vbl_received);
30339 + return atomic_read_unchecked(&dev_priv->vbl_received);
30340 }
30341
30342 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30343 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30344
30345 status = VIA_READ(VIA_REG_INTERRUPT);
30346 if (status & VIA_IRQ_VBLANK_PENDING) {
30347 - atomic_inc(&dev_priv->vbl_received);
30348 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30349 + atomic_inc_unchecked(&dev_priv->vbl_received);
30350 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30351 do_gettimeofday(&cur_vblank);
30352 if (dev_priv->last_vblank_valid) {
30353 dev_priv->usec_per_vblank =
30354 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30355 dev_priv->last_vblank = cur_vblank;
30356 dev_priv->last_vblank_valid = 1;
30357 }
30358 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30359 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30360 DRM_DEBUG("US per vblank is: %u\n",
30361 dev_priv->usec_per_vblank);
30362 }
30363 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30364
30365 for (i = 0; i < dev_priv->num_irqs; ++i) {
30366 if (status & cur_irq->pending_mask) {
30367 - atomic_inc(&cur_irq->irq_received);
30368 + atomic_inc_unchecked(&cur_irq->irq_received);
30369 DRM_WAKEUP(&cur_irq->irq_queue);
30370 handled = 1;
30371 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30372 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30373 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30374 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30375 masks[irq][4]));
30376 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30377 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30378 } else {
30379 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30380 (((cur_irq_sequence =
30381 - atomic_read(&cur_irq->irq_received)) -
30382 + atomic_read_unchecked(&cur_irq->irq_received)) -
30383 *sequence) <= (1 << 23)));
30384 }
30385 *sequence = cur_irq_sequence;
30386 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30387 }
30388
30389 for (i = 0; i < dev_priv->num_irqs; ++i) {
30390 - atomic_set(&cur_irq->irq_received, 0);
30391 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30392 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30393 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30394 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30395 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30396 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30397 case VIA_IRQ_RELATIVE:
30398 irqwait->request.sequence +=
30399 - atomic_read(&cur_irq->irq_received);
30400 + atomic_read_unchecked(&cur_irq->irq_received);
30401 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30402 case VIA_IRQ_ABSOLUTE:
30403 break;
30404 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30405 index dc27970..f18b008 100644
30406 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30407 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30408 @@ -260,7 +260,7 @@ struct vmw_private {
30409 * Fencing and IRQs.
30410 */
30411
30412 - atomic_t marker_seq;
30413 + atomic_unchecked_t marker_seq;
30414 wait_queue_head_t fence_queue;
30415 wait_queue_head_t fifo_queue;
30416 int fence_queue_waiters; /* Protected by hw_mutex */
30417 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30418 index a0c2f12..68ae6cb 100644
30419 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30420 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30421 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
30422 (unsigned int) min,
30423 (unsigned int) fifo->capabilities);
30424
30425 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30426 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30427 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
30428 vmw_marker_queue_init(&fifo->marker_queue);
30429 return vmw_fifo_send_fence(dev_priv, &dummy);
30430 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
30431 if (reserveable)
30432 iowrite32(bytes, fifo_mem +
30433 SVGA_FIFO_RESERVED);
30434 - return fifo_mem + (next_cmd >> 2);
30435 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
30436 } else {
30437 need_bounce = true;
30438 }
30439 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30440
30441 fm = vmw_fifo_reserve(dev_priv, bytes);
30442 if (unlikely(fm == NULL)) {
30443 - *seqno = atomic_read(&dev_priv->marker_seq);
30444 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30445 ret = -ENOMEM;
30446 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
30447 false, 3*HZ);
30448 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30449 }
30450
30451 do {
30452 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
30453 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
30454 } while (*seqno == 0);
30455
30456 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
30457 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30458 index cabc95f..14b3d77 100644
30459 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30460 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30461 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
30462 * emitted. Then the fence is stale and signaled.
30463 */
30464
30465 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
30466 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
30467 > VMW_FENCE_WRAP);
30468
30469 return ret;
30470 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
30471
30472 if (fifo_idle)
30473 down_read(&fifo_state->rwsem);
30474 - signal_seq = atomic_read(&dev_priv->marker_seq);
30475 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
30476 ret = 0;
30477
30478 for (;;) {
30479 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30480 index 8a8725c..afed796 100644
30481 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30482 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30483 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
30484 while (!vmw_lag_lt(queue, us)) {
30485 spin_lock(&queue->lock);
30486 if (list_empty(&queue->head))
30487 - seqno = atomic_read(&dev_priv->marker_seq);
30488 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30489 else {
30490 marker = list_first_entry(&queue->head,
30491 struct vmw_marker, head);
30492 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
30493 index af08ce7..7a15038 100644
30494 --- a/drivers/hid/hid-core.c
30495 +++ b/drivers/hid/hid-core.c
30496 @@ -2020,7 +2020,7 @@ static bool hid_ignore(struct hid_device *hdev)
30497
30498 int hid_add_device(struct hid_device *hdev)
30499 {
30500 - static atomic_t id = ATOMIC_INIT(0);
30501 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30502 int ret;
30503
30504 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30505 @@ -2035,7 +2035,7 @@ int hid_add_device(struct hid_device *hdev)
30506 /* XXX hack, any other cleaner solution after the driver core
30507 * is converted to allow more than 20 bytes as the device name? */
30508 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30509 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30510 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30511
30512 hid_debug_register(hdev, dev_name(&hdev->dev));
30513 ret = device_add(&hdev->dev);
30514 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
30515 index b1ec0e2..c295a61 100644
30516 --- a/drivers/hid/usbhid/hiddev.c
30517 +++ b/drivers/hid/usbhid/hiddev.c
30518 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
30519 break;
30520
30521 case HIDIOCAPPLICATION:
30522 - if (arg < 0 || arg >= hid->maxapplication)
30523 + if (arg >= hid->maxapplication)
30524 break;
30525
30526 for (i = 0; i < hid->maxcollection; i++)
30527 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
30528 index 4065374..10ed7dc 100644
30529 --- a/drivers/hv/channel.c
30530 +++ b/drivers/hv/channel.c
30531 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
30532 int ret = 0;
30533 int t;
30534
30535 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
30536 - atomic_inc(&vmbus_connection.next_gpadl_handle);
30537 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
30538 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
30539
30540 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
30541 if (ret)
30542 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
30543 index 12aa97f..c0679f7 100644
30544 --- a/drivers/hv/hv.c
30545 +++ b/drivers/hv/hv.c
30546 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
30547 u64 output_address = (output) ? virt_to_phys(output) : 0;
30548 u32 output_address_hi = output_address >> 32;
30549 u32 output_address_lo = output_address & 0xFFFFFFFF;
30550 - void *hypercall_page = hv_context.hypercall_page;
30551 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
30552
30553 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
30554 "=a"(hv_status_lo) : "d" (control_hi),
30555 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
30556 index 6d7d286..92b0873 100644
30557 --- a/drivers/hv/hyperv_vmbus.h
30558 +++ b/drivers/hv/hyperv_vmbus.h
30559 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
30560 struct vmbus_connection {
30561 enum vmbus_connect_state conn_state;
30562
30563 - atomic_t next_gpadl_handle;
30564 + atomic_unchecked_t next_gpadl_handle;
30565
30566 /*
30567 * Represents channel interrupts. Each bit position represents a
30568 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
30569 index a220e57..428f54d 100644
30570 --- a/drivers/hv/vmbus_drv.c
30571 +++ b/drivers/hv/vmbus_drv.c
30572 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
30573 {
30574 int ret = 0;
30575
30576 - static atomic_t device_num = ATOMIC_INIT(0);
30577 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
30578
30579 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
30580 - atomic_inc_return(&device_num));
30581 + atomic_inc_return_unchecked(&device_num));
30582
30583 child_device_obj->device.bus = &hv_bus;
30584 child_device_obj->device.parent = &hv_acpi_dev->dev;
30585 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
30586 index 554f046..f8b4729 100644
30587 --- a/drivers/hwmon/acpi_power_meter.c
30588 +++ b/drivers/hwmon/acpi_power_meter.c
30589 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
30590 return res;
30591
30592 temp /= 1000;
30593 - if (temp < 0)
30594 - return -EINVAL;
30595
30596 mutex_lock(&resource->lock);
30597 resource->trip[attr->index - 7] = temp;
30598 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
30599 index 91fdd1f..b66a686 100644
30600 --- a/drivers/hwmon/sht15.c
30601 +++ b/drivers/hwmon/sht15.c
30602 @@ -166,7 +166,7 @@ struct sht15_data {
30603 int supply_uV;
30604 bool supply_uV_valid;
30605 struct work_struct update_supply_work;
30606 - atomic_t interrupt_handled;
30607 + atomic_unchecked_t interrupt_handled;
30608 };
30609
30610 /**
30611 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
30612 return ret;
30613
30614 gpio_direction_input(data->pdata->gpio_data);
30615 - atomic_set(&data->interrupt_handled, 0);
30616 + atomic_set_unchecked(&data->interrupt_handled, 0);
30617
30618 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30619 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30620 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30621 /* Only relevant if the interrupt hasn't occurred. */
30622 - if (!atomic_read(&data->interrupt_handled))
30623 + if (!atomic_read_unchecked(&data->interrupt_handled))
30624 schedule_work(&data->read_work);
30625 }
30626 ret = wait_event_timeout(data->wait_queue,
30627 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
30628
30629 /* First disable the interrupt */
30630 disable_irq_nosync(irq);
30631 - atomic_inc(&data->interrupt_handled);
30632 + atomic_inc_unchecked(&data->interrupt_handled);
30633 /* Then schedule a reading work struct */
30634 if (data->state != SHT15_READING_NOTHING)
30635 schedule_work(&data->read_work);
30636 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
30637 * If not, then start the interrupt again - care here as could
30638 * have gone low in meantime so verify it hasn't!
30639 */
30640 - atomic_set(&data->interrupt_handled, 0);
30641 + atomic_set_unchecked(&data->interrupt_handled, 0);
30642 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30643 /* If still not occurred or another handler has been scheduled */
30644 if (gpio_get_value(data->pdata->gpio_data)
30645 - || atomic_read(&data->interrupt_handled))
30646 + || atomic_read_unchecked(&data->interrupt_handled))
30647 return;
30648 }
30649
30650 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
30651 index 378fcb5..5e91fa8 100644
30652 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
30653 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
30654 @@ -43,7 +43,7 @@
30655 extern struct i2c_adapter amd756_smbus;
30656
30657 static struct i2c_adapter *s4882_adapter;
30658 -static struct i2c_algorithm *s4882_algo;
30659 +static i2c_algorithm_no_const *s4882_algo;
30660
30661 /* Wrapper access functions for multiplexed SMBus */
30662 static DEFINE_MUTEX(amd756_lock);
30663 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
30664 index 29015eb..af2d8e9 100644
30665 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
30666 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
30667 @@ -41,7 +41,7 @@
30668 extern struct i2c_adapter *nforce2_smbus;
30669
30670 static struct i2c_adapter *s4985_adapter;
30671 -static struct i2c_algorithm *s4985_algo;
30672 +static i2c_algorithm_no_const *s4985_algo;
30673
30674 /* Wrapper access functions for multiplexed SMBus */
30675 static DEFINE_MUTEX(nforce2_lock);
30676 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
30677 index d7a4833..7fae376 100644
30678 --- a/drivers/i2c/i2c-mux.c
30679 +++ b/drivers/i2c/i2c-mux.c
30680 @@ -28,7 +28,7 @@
30681 /* multiplexer per channel data */
30682 struct i2c_mux_priv {
30683 struct i2c_adapter adap;
30684 - struct i2c_algorithm algo;
30685 + i2c_algorithm_no_const algo;
30686
30687 struct i2c_adapter *parent;
30688 void *mux_dev; /* the mux chip/device */
30689 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
30690 index 57d00ca..0145194 100644
30691 --- a/drivers/ide/aec62xx.c
30692 +++ b/drivers/ide/aec62xx.c
30693 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
30694 .cable_detect = atp86x_cable_detect,
30695 };
30696
30697 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
30698 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
30699 { /* 0: AEC6210 */
30700 .name = DRV_NAME,
30701 .init_chipset = init_chipset_aec62xx,
30702 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
30703 index 2c8016a..911a27c 100644
30704 --- a/drivers/ide/alim15x3.c
30705 +++ b/drivers/ide/alim15x3.c
30706 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
30707 .dma_sff_read_status = ide_dma_sff_read_status,
30708 };
30709
30710 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
30711 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
30712 .name = DRV_NAME,
30713 .init_chipset = init_chipset_ali15x3,
30714 .init_hwif = init_hwif_ali15x3,
30715 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
30716 index 3747b25..56fc995 100644
30717 --- a/drivers/ide/amd74xx.c
30718 +++ b/drivers/ide/amd74xx.c
30719 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
30720 .udma_mask = udma, \
30721 }
30722
30723 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
30724 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
30725 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
30726 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
30727 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
30728 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
30729 index 15f0ead..cb43480 100644
30730 --- a/drivers/ide/atiixp.c
30731 +++ b/drivers/ide/atiixp.c
30732 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
30733 .cable_detect = atiixp_cable_detect,
30734 };
30735
30736 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
30737 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
30738 { /* 0: IXP200/300/400/700 */
30739 .name = DRV_NAME,
30740 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
30741 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
30742 index 5f80312..d1fc438 100644
30743 --- a/drivers/ide/cmd64x.c
30744 +++ b/drivers/ide/cmd64x.c
30745 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
30746 .dma_sff_read_status = ide_dma_sff_read_status,
30747 };
30748
30749 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
30750 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
30751 { /* 0: CMD643 */
30752 .name = DRV_NAME,
30753 .init_chipset = init_chipset_cmd64x,
30754 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
30755 index 2c1e5f7..1444762 100644
30756 --- a/drivers/ide/cs5520.c
30757 +++ b/drivers/ide/cs5520.c
30758 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
30759 .set_dma_mode = cs5520_set_dma_mode,
30760 };
30761
30762 -static const struct ide_port_info cyrix_chipset __devinitdata = {
30763 +static const struct ide_port_info cyrix_chipset __devinitconst = {
30764 .name = DRV_NAME,
30765 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
30766 .port_ops = &cs5520_port_ops,
30767 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
30768 index 4dc4eb9..49b40ad 100644
30769 --- a/drivers/ide/cs5530.c
30770 +++ b/drivers/ide/cs5530.c
30771 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
30772 .udma_filter = cs5530_udma_filter,
30773 };
30774
30775 -static const struct ide_port_info cs5530_chipset __devinitdata = {
30776 +static const struct ide_port_info cs5530_chipset __devinitconst = {
30777 .name = DRV_NAME,
30778 .init_chipset = init_chipset_cs5530,
30779 .init_hwif = init_hwif_cs5530,
30780 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
30781 index 5059faf..18d4c85 100644
30782 --- a/drivers/ide/cs5535.c
30783 +++ b/drivers/ide/cs5535.c
30784 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
30785 .cable_detect = cs5535_cable_detect,
30786 };
30787
30788 -static const struct ide_port_info cs5535_chipset __devinitdata = {
30789 +static const struct ide_port_info cs5535_chipset __devinitconst = {
30790 .name = DRV_NAME,
30791 .port_ops = &cs5535_port_ops,
30792 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
30793 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
30794 index 847553f..3ffb49d 100644
30795 --- a/drivers/ide/cy82c693.c
30796 +++ b/drivers/ide/cy82c693.c
30797 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
30798 .set_dma_mode = cy82c693_set_dma_mode,
30799 };
30800
30801 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
30802 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
30803 .name = DRV_NAME,
30804 .init_iops = init_iops_cy82c693,
30805 .port_ops = &cy82c693_port_ops,
30806 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
30807 index 58c51cd..4aec3b8 100644
30808 --- a/drivers/ide/hpt366.c
30809 +++ b/drivers/ide/hpt366.c
30810 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
30811 }
30812 };
30813
30814 -static const struct hpt_info hpt36x __devinitdata = {
30815 +static const struct hpt_info hpt36x __devinitconst = {
30816 .chip_name = "HPT36x",
30817 .chip_type = HPT36x,
30818 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
30819 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
30820 .timings = &hpt36x_timings
30821 };
30822
30823 -static const struct hpt_info hpt370 __devinitdata = {
30824 +static const struct hpt_info hpt370 __devinitconst = {
30825 .chip_name = "HPT370",
30826 .chip_type = HPT370,
30827 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30828 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
30829 .timings = &hpt37x_timings
30830 };
30831
30832 -static const struct hpt_info hpt370a __devinitdata = {
30833 +static const struct hpt_info hpt370a __devinitconst = {
30834 .chip_name = "HPT370A",
30835 .chip_type = HPT370A,
30836 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30837 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
30838 .timings = &hpt37x_timings
30839 };
30840
30841 -static const struct hpt_info hpt374 __devinitdata = {
30842 +static const struct hpt_info hpt374 __devinitconst = {
30843 .chip_name = "HPT374",
30844 .chip_type = HPT374,
30845 .udma_mask = ATA_UDMA5,
30846 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
30847 .timings = &hpt37x_timings
30848 };
30849
30850 -static const struct hpt_info hpt372 __devinitdata = {
30851 +static const struct hpt_info hpt372 __devinitconst = {
30852 .chip_name = "HPT372",
30853 .chip_type = HPT372,
30854 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30855 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
30856 .timings = &hpt37x_timings
30857 };
30858
30859 -static const struct hpt_info hpt372a __devinitdata = {
30860 +static const struct hpt_info hpt372a __devinitconst = {
30861 .chip_name = "HPT372A",
30862 .chip_type = HPT372A,
30863 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30864 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
30865 .timings = &hpt37x_timings
30866 };
30867
30868 -static const struct hpt_info hpt302 __devinitdata = {
30869 +static const struct hpt_info hpt302 __devinitconst = {
30870 .chip_name = "HPT302",
30871 .chip_type = HPT302,
30872 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30873 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
30874 .timings = &hpt37x_timings
30875 };
30876
30877 -static const struct hpt_info hpt371 __devinitdata = {
30878 +static const struct hpt_info hpt371 __devinitconst = {
30879 .chip_name = "HPT371",
30880 .chip_type = HPT371,
30881 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30882 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
30883 .timings = &hpt37x_timings
30884 };
30885
30886 -static const struct hpt_info hpt372n __devinitdata = {
30887 +static const struct hpt_info hpt372n __devinitconst = {
30888 .chip_name = "HPT372N",
30889 .chip_type = HPT372N,
30890 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30891 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
30892 .timings = &hpt37x_timings
30893 };
30894
30895 -static const struct hpt_info hpt302n __devinitdata = {
30896 +static const struct hpt_info hpt302n __devinitconst = {
30897 .chip_name = "HPT302N",
30898 .chip_type = HPT302N,
30899 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30900 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
30901 .timings = &hpt37x_timings
30902 };
30903
30904 -static const struct hpt_info hpt371n __devinitdata = {
30905 +static const struct hpt_info hpt371n __devinitconst = {
30906 .chip_name = "HPT371N",
30907 .chip_type = HPT371N,
30908 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30909 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
30910 .dma_sff_read_status = ide_dma_sff_read_status,
30911 };
30912
30913 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
30914 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
30915 { /* 0: HPT36x */
30916 .name = DRV_NAME,
30917 .init_chipset = init_chipset_hpt366,
30918 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
30919 index 8126824..55a2798 100644
30920 --- a/drivers/ide/ide-cd.c
30921 +++ b/drivers/ide/ide-cd.c
30922 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
30923 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30924 if ((unsigned long)buf & alignment
30925 || blk_rq_bytes(rq) & q->dma_pad_mask
30926 - || object_is_on_stack(buf))
30927 + || object_starts_on_stack(buf))
30928 drive->dma = 0;
30929 }
30930 }
30931 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
30932 index 7f56b73..dab5b67 100644
30933 --- a/drivers/ide/ide-pci-generic.c
30934 +++ b/drivers/ide/ide-pci-generic.c
30935 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
30936 .udma_mask = ATA_UDMA6, \
30937 }
30938
30939 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
30940 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
30941 /* 0: Unknown */
30942 DECLARE_GENERIC_PCI_DEV(0),
30943
30944 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
30945 index 560e66d..d5dd180 100644
30946 --- a/drivers/ide/it8172.c
30947 +++ b/drivers/ide/it8172.c
30948 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
30949 .set_dma_mode = it8172_set_dma_mode,
30950 };
30951
30952 -static const struct ide_port_info it8172_port_info __devinitdata = {
30953 +static const struct ide_port_info it8172_port_info __devinitconst = {
30954 .name = DRV_NAME,
30955 .port_ops = &it8172_port_ops,
30956 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
30957 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
30958 index 46816ba..1847aeb 100644
30959 --- a/drivers/ide/it8213.c
30960 +++ b/drivers/ide/it8213.c
30961 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
30962 .cable_detect = it8213_cable_detect,
30963 };
30964
30965 -static const struct ide_port_info it8213_chipset __devinitdata = {
30966 +static const struct ide_port_info it8213_chipset __devinitconst = {
30967 .name = DRV_NAME,
30968 .enablebits = { {0x41, 0x80, 0x80} },
30969 .port_ops = &it8213_port_ops,
30970 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
30971 index 2e3169f..c5611db 100644
30972 --- a/drivers/ide/it821x.c
30973 +++ b/drivers/ide/it821x.c
30974 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
30975 .cable_detect = it821x_cable_detect,
30976 };
30977
30978 -static const struct ide_port_info it821x_chipset __devinitdata = {
30979 +static const struct ide_port_info it821x_chipset __devinitconst = {
30980 .name = DRV_NAME,
30981 .init_chipset = init_chipset_it821x,
30982 .init_hwif = init_hwif_it821x,
30983 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
30984 index 74c2c4a..efddd7d 100644
30985 --- a/drivers/ide/jmicron.c
30986 +++ b/drivers/ide/jmicron.c
30987 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
30988 .cable_detect = jmicron_cable_detect,
30989 };
30990
30991 -static const struct ide_port_info jmicron_chipset __devinitdata = {
30992 +static const struct ide_port_info jmicron_chipset __devinitconst = {
30993 .name = DRV_NAME,
30994 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
30995 .port_ops = &jmicron_port_ops,
30996 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
30997 index 95327a2..73f78d8 100644
30998 --- a/drivers/ide/ns87415.c
30999 +++ b/drivers/ide/ns87415.c
31000 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31001 .dma_sff_read_status = superio_dma_sff_read_status,
31002 };
31003
31004 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31005 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31006 .name = DRV_NAME,
31007 .init_hwif = init_hwif_ns87415,
31008 .tp_ops = &ns87415_tp_ops,
31009 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31010 index 1a53a4c..39edc66 100644
31011 --- a/drivers/ide/opti621.c
31012 +++ b/drivers/ide/opti621.c
31013 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31014 .set_pio_mode = opti621_set_pio_mode,
31015 };
31016
31017 -static const struct ide_port_info opti621_chipset __devinitdata = {
31018 +static const struct ide_port_info opti621_chipset __devinitconst = {
31019 .name = DRV_NAME,
31020 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31021 .port_ops = &opti621_port_ops,
31022 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31023 index 9546fe2..2e5ceb6 100644
31024 --- a/drivers/ide/pdc202xx_new.c
31025 +++ b/drivers/ide/pdc202xx_new.c
31026 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31027 .udma_mask = udma, \
31028 }
31029
31030 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31031 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31032 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31033 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31034 };
31035 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31036 index 3a35ec6..5634510 100644
31037 --- a/drivers/ide/pdc202xx_old.c
31038 +++ b/drivers/ide/pdc202xx_old.c
31039 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31040 .max_sectors = sectors, \
31041 }
31042
31043 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31044 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31045 { /* 0: PDC20246 */
31046 .name = DRV_NAME,
31047 .init_chipset = init_chipset_pdc202xx,
31048 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31049 index 1892e81..fe0fd60 100644
31050 --- a/drivers/ide/piix.c
31051 +++ b/drivers/ide/piix.c
31052 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31053 .udma_mask = udma, \
31054 }
31055
31056 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31057 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31058 /* 0: MPIIX */
31059 { /*
31060 * MPIIX actually has only a single IDE channel mapped to
31061 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31062 index a6414a8..c04173e 100644
31063 --- a/drivers/ide/rz1000.c
31064 +++ b/drivers/ide/rz1000.c
31065 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31066 }
31067 }
31068
31069 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31070 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31071 .name = DRV_NAME,
31072 .host_flags = IDE_HFLAG_NO_DMA,
31073 };
31074 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31075 index 356b9b5..d4758eb 100644
31076 --- a/drivers/ide/sc1200.c
31077 +++ b/drivers/ide/sc1200.c
31078 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31079 .dma_sff_read_status = ide_dma_sff_read_status,
31080 };
31081
31082 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31083 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31084 .name = DRV_NAME,
31085 .port_ops = &sc1200_port_ops,
31086 .dma_ops = &sc1200_dma_ops,
31087 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31088 index b7f5b0c..9701038 100644
31089 --- a/drivers/ide/scc_pata.c
31090 +++ b/drivers/ide/scc_pata.c
31091 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31092 .dma_sff_read_status = scc_dma_sff_read_status,
31093 };
31094
31095 -static const struct ide_port_info scc_chipset __devinitdata = {
31096 +static const struct ide_port_info scc_chipset __devinitconst = {
31097 .name = "sccIDE",
31098 .init_iops = init_iops_scc,
31099 .init_dma = scc_init_dma,
31100 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31101 index 35fb8da..24d72ef 100644
31102 --- a/drivers/ide/serverworks.c
31103 +++ b/drivers/ide/serverworks.c
31104 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31105 .cable_detect = svwks_cable_detect,
31106 };
31107
31108 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31109 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31110 { /* 0: OSB4 */
31111 .name = DRV_NAME,
31112 .init_chipset = init_chipset_svwks,
31113 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31114 index ddeda44..46f7e30 100644
31115 --- a/drivers/ide/siimage.c
31116 +++ b/drivers/ide/siimage.c
31117 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31118 .udma_mask = ATA_UDMA6, \
31119 }
31120
31121 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31122 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31123 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31124 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31125 };
31126 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31127 index 4a00225..09e61b4 100644
31128 --- a/drivers/ide/sis5513.c
31129 +++ b/drivers/ide/sis5513.c
31130 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31131 .cable_detect = sis_cable_detect,
31132 };
31133
31134 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31135 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31136 .name = DRV_NAME,
31137 .init_chipset = init_chipset_sis5513,
31138 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31139 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31140 index f21dc2a..d051cd2 100644
31141 --- a/drivers/ide/sl82c105.c
31142 +++ b/drivers/ide/sl82c105.c
31143 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31144 .dma_sff_read_status = ide_dma_sff_read_status,
31145 };
31146
31147 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31148 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31149 .name = DRV_NAME,
31150 .init_chipset = init_chipset_sl82c105,
31151 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31152 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31153 index 864ffe0..863a5e9 100644
31154 --- a/drivers/ide/slc90e66.c
31155 +++ b/drivers/ide/slc90e66.c
31156 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31157 .cable_detect = slc90e66_cable_detect,
31158 };
31159
31160 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31161 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31162 .name = DRV_NAME,
31163 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31164 .port_ops = &slc90e66_port_ops,
31165 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31166 index 4799d5c..1794678 100644
31167 --- a/drivers/ide/tc86c001.c
31168 +++ b/drivers/ide/tc86c001.c
31169 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31170 .dma_sff_read_status = ide_dma_sff_read_status,
31171 };
31172
31173 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31174 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31175 .name = DRV_NAME,
31176 .init_hwif = init_hwif_tc86c001,
31177 .port_ops = &tc86c001_port_ops,
31178 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31179 index 281c914..55ce1b8 100644
31180 --- a/drivers/ide/triflex.c
31181 +++ b/drivers/ide/triflex.c
31182 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31183 .set_dma_mode = triflex_set_mode,
31184 };
31185
31186 -static const struct ide_port_info triflex_device __devinitdata = {
31187 +static const struct ide_port_info triflex_device __devinitconst = {
31188 .name = DRV_NAME,
31189 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31190 .port_ops = &triflex_port_ops,
31191 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31192 index 4b42ca0..e494a98 100644
31193 --- a/drivers/ide/trm290.c
31194 +++ b/drivers/ide/trm290.c
31195 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31196 .dma_check = trm290_dma_check,
31197 };
31198
31199 -static const struct ide_port_info trm290_chipset __devinitdata = {
31200 +static const struct ide_port_info trm290_chipset __devinitconst = {
31201 .name = DRV_NAME,
31202 .init_hwif = init_hwif_trm290,
31203 .tp_ops = &trm290_tp_ops,
31204 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31205 index f46f49c..eb77678 100644
31206 --- a/drivers/ide/via82cxxx.c
31207 +++ b/drivers/ide/via82cxxx.c
31208 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31209 .cable_detect = via82cxxx_cable_detect,
31210 };
31211
31212 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31213 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31214 .name = DRV_NAME,
31215 .init_chipset = init_chipset_via82cxxx,
31216 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31217 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31218 index 73d4531..c90cd2d 100644
31219 --- a/drivers/ieee802154/fakehard.c
31220 +++ b/drivers/ieee802154/fakehard.c
31221 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31222 phy->transmit_power = 0xbf;
31223
31224 dev->netdev_ops = &fake_ops;
31225 - dev->ml_priv = &fake_mlme;
31226 + dev->ml_priv = (void *)&fake_mlme;
31227
31228 priv = netdev_priv(dev);
31229 priv->phy = phy;
31230 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31231 index c889aae..6cf5aa7 100644
31232 --- a/drivers/infiniband/core/cm.c
31233 +++ b/drivers/infiniband/core/cm.c
31234 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31235
31236 struct cm_counter_group {
31237 struct kobject obj;
31238 - atomic_long_t counter[CM_ATTR_COUNT];
31239 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31240 };
31241
31242 struct cm_counter_attribute {
31243 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31244 struct ib_mad_send_buf *msg = NULL;
31245 int ret;
31246
31247 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31248 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31249 counter[CM_REQ_COUNTER]);
31250
31251 /* Quick state check to discard duplicate REQs. */
31252 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31253 if (!cm_id_priv)
31254 return;
31255
31256 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31257 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31258 counter[CM_REP_COUNTER]);
31259 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31260 if (ret)
31261 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31262 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31263 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31264 spin_unlock_irq(&cm_id_priv->lock);
31265 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31266 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31267 counter[CM_RTU_COUNTER]);
31268 goto out;
31269 }
31270 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31271 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31272 dreq_msg->local_comm_id);
31273 if (!cm_id_priv) {
31274 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31275 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31276 counter[CM_DREQ_COUNTER]);
31277 cm_issue_drep(work->port, work->mad_recv_wc);
31278 return -EINVAL;
31279 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31280 case IB_CM_MRA_REP_RCVD:
31281 break;
31282 case IB_CM_TIMEWAIT:
31283 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31284 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31285 counter[CM_DREQ_COUNTER]);
31286 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31287 goto unlock;
31288 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31289 cm_free_msg(msg);
31290 goto deref;
31291 case IB_CM_DREQ_RCVD:
31292 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31293 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31294 counter[CM_DREQ_COUNTER]);
31295 goto unlock;
31296 default:
31297 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31298 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31299 cm_id_priv->msg, timeout)) {
31300 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31301 - atomic_long_inc(&work->port->
31302 + atomic_long_inc_unchecked(&work->port->
31303 counter_group[CM_RECV_DUPLICATES].
31304 counter[CM_MRA_COUNTER]);
31305 goto out;
31306 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31307 break;
31308 case IB_CM_MRA_REQ_RCVD:
31309 case IB_CM_MRA_REP_RCVD:
31310 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31311 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31312 counter[CM_MRA_COUNTER]);
31313 /* fall through */
31314 default:
31315 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31316 case IB_CM_LAP_IDLE:
31317 break;
31318 case IB_CM_MRA_LAP_SENT:
31319 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31320 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31321 counter[CM_LAP_COUNTER]);
31322 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31323 goto unlock;
31324 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31325 cm_free_msg(msg);
31326 goto deref;
31327 case IB_CM_LAP_RCVD:
31328 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31329 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31330 counter[CM_LAP_COUNTER]);
31331 goto unlock;
31332 default:
31333 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31334 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31335 if (cur_cm_id_priv) {
31336 spin_unlock_irq(&cm.lock);
31337 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31338 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31339 counter[CM_SIDR_REQ_COUNTER]);
31340 goto out; /* Duplicate message. */
31341 }
31342 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31343 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31344 msg->retries = 1;
31345
31346 - atomic_long_add(1 + msg->retries,
31347 + atomic_long_add_unchecked(1 + msg->retries,
31348 &port->counter_group[CM_XMIT].counter[attr_index]);
31349 if (msg->retries)
31350 - atomic_long_add(msg->retries,
31351 + atomic_long_add_unchecked(msg->retries,
31352 &port->counter_group[CM_XMIT_RETRIES].
31353 counter[attr_index]);
31354
31355 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31356 }
31357
31358 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31359 - atomic_long_inc(&port->counter_group[CM_RECV].
31360 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31361 counter[attr_id - CM_ATTR_ID_OFFSET]);
31362
31363 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31364 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31365 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31366
31367 return sprintf(buf, "%ld\n",
31368 - atomic_long_read(&group->counter[cm_attr->index]));
31369 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31370 }
31371
31372 static const struct sysfs_ops cm_counter_ops = {
31373 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31374 index 176c8f9..2627b62 100644
31375 --- a/drivers/infiniband/core/fmr_pool.c
31376 +++ b/drivers/infiniband/core/fmr_pool.c
31377 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
31378
31379 struct task_struct *thread;
31380
31381 - atomic_t req_ser;
31382 - atomic_t flush_ser;
31383 + atomic_unchecked_t req_ser;
31384 + atomic_unchecked_t flush_ser;
31385
31386 wait_queue_head_t force_wait;
31387 };
31388 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31389 struct ib_fmr_pool *pool = pool_ptr;
31390
31391 do {
31392 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31393 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31394 ib_fmr_batch_release(pool);
31395
31396 - atomic_inc(&pool->flush_ser);
31397 + atomic_inc_unchecked(&pool->flush_ser);
31398 wake_up_interruptible(&pool->force_wait);
31399
31400 if (pool->flush_function)
31401 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31402 }
31403
31404 set_current_state(TASK_INTERRUPTIBLE);
31405 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31406 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31407 !kthread_should_stop())
31408 schedule();
31409 __set_current_state(TASK_RUNNING);
31410 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31411 pool->dirty_watermark = params->dirty_watermark;
31412 pool->dirty_len = 0;
31413 spin_lock_init(&pool->pool_lock);
31414 - atomic_set(&pool->req_ser, 0);
31415 - atomic_set(&pool->flush_ser, 0);
31416 + atomic_set_unchecked(&pool->req_ser, 0);
31417 + atomic_set_unchecked(&pool->flush_ser, 0);
31418 init_waitqueue_head(&pool->force_wait);
31419
31420 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31421 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
31422 }
31423 spin_unlock_irq(&pool->pool_lock);
31424
31425 - serial = atomic_inc_return(&pool->req_ser);
31426 + serial = atomic_inc_return_unchecked(&pool->req_ser);
31427 wake_up_process(pool->thread);
31428
31429 if (wait_event_interruptible(pool->force_wait,
31430 - atomic_read(&pool->flush_ser) - serial >= 0))
31431 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31432 return -EINTR;
31433
31434 return 0;
31435 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
31436 } else {
31437 list_add_tail(&fmr->list, &pool->dirty_list);
31438 if (++pool->dirty_len >= pool->dirty_watermark) {
31439 - atomic_inc(&pool->req_ser);
31440 + atomic_inc_unchecked(&pool->req_ser);
31441 wake_up_process(pool->thread);
31442 }
31443 }
31444 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
31445 index 40c8353..946b0e4 100644
31446 --- a/drivers/infiniband/hw/cxgb4/mem.c
31447 +++ b/drivers/infiniband/hw/cxgb4/mem.c
31448 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31449 int err;
31450 struct fw_ri_tpte tpt;
31451 u32 stag_idx;
31452 - static atomic_t key;
31453 + static atomic_unchecked_t key;
31454
31455 if (c4iw_fatal_error(rdev))
31456 return -EIO;
31457 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31458 &rdev->resource.tpt_fifo_lock);
31459 if (!stag_idx)
31460 return -ENOMEM;
31461 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
31462 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
31463 }
31464 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
31465 __func__, stag_state, type, pdid, stag_idx);
31466 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
31467 index 79b3dbc..96e5fcc 100644
31468 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
31469 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
31470 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31471 struct ib_atomic_eth *ateth;
31472 struct ipath_ack_entry *e;
31473 u64 vaddr;
31474 - atomic64_t *maddr;
31475 + atomic64_unchecked_t *maddr;
31476 u64 sdata;
31477 u32 rkey;
31478 u8 next;
31479 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31480 IB_ACCESS_REMOTE_ATOMIC)))
31481 goto nack_acc_unlck;
31482 /* Perform atomic OP and save result. */
31483 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31484 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31485 sdata = be64_to_cpu(ateth->swap_data);
31486 e = &qp->s_ack_queue[qp->r_head_ack_queue];
31487 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
31488 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31489 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31490 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31491 be64_to_cpu(ateth->compare_data),
31492 sdata);
31493 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
31494 index 1f95bba..9530f87 100644
31495 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
31496 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
31497 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
31498 unsigned long flags;
31499 struct ib_wc wc;
31500 u64 sdata;
31501 - atomic64_t *maddr;
31502 + atomic64_unchecked_t *maddr;
31503 enum ib_wc_status send_status;
31504
31505 /*
31506 @@ -382,11 +382,11 @@ again:
31507 IB_ACCESS_REMOTE_ATOMIC)))
31508 goto acc_err;
31509 /* Perform atomic OP and save result. */
31510 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31511 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31512 sdata = wqe->wr.wr.atomic.compare_add;
31513 *(u64 *) sqp->s_sge.sge.vaddr =
31514 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
31515 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31516 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31517 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31518 sdata, wqe->wr.wr.atomic.swap);
31519 goto send_comp;
31520 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
31521 index 7140199..da60063 100644
31522 --- a/drivers/infiniband/hw/nes/nes.c
31523 +++ b/drivers/infiniband/hw/nes/nes.c
31524 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
31525 LIST_HEAD(nes_adapter_list);
31526 static LIST_HEAD(nes_dev_list);
31527
31528 -atomic_t qps_destroyed;
31529 +atomic_unchecked_t qps_destroyed;
31530
31531 static unsigned int ee_flsh_adapter;
31532 static unsigned int sysfs_nonidx_addr;
31533 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
31534 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
31535 struct nes_adapter *nesadapter = nesdev->nesadapter;
31536
31537 - atomic_inc(&qps_destroyed);
31538 + atomic_inc_unchecked(&qps_destroyed);
31539
31540 /* Free the control structures */
31541
31542 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
31543 index c438e46..ca30356 100644
31544 --- a/drivers/infiniband/hw/nes/nes.h
31545 +++ b/drivers/infiniband/hw/nes/nes.h
31546 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
31547 extern unsigned int wqm_quanta;
31548 extern struct list_head nes_adapter_list;
31549
31550 -extern atomic_t cm_connects;
31551 -extern atomic_t cm_accepts;
31552 -extern atomic_t cm_disconnects;
31553 -extern atomic_t cm_closes;
31554 -extern atomic_t cm_connecteds;
31555 -extern atomic_t cm_connect_reqs;
31556 -extern atomic_t cm_rejects;
31557 -extern atomic_t mod_qp_timouts;
31558 -extern atomic_t qps_created;
31559 -extern atomic_t qps_destroyed;
31560 -extern atomic_t sw_qps_destroyed;
31561 +extern atomic_unchecked_t cm_connects;
31562 +extern atomic_unchecked_t cm_accepts;
31563 +extern atomic_unchecked_t cm_disconnects;
31564 +extern atomic_unchecked_t cm_closes;
31565 +extern atomic_unchecked_t cm_connecteds;
31566 +extern atomic_unchecked_t cm_connect_reqs;
31567 +extern atomic_unchecked_t cm_rejects;
31568 +extern atomic_unchecked_t mod_qp_timouts;
31569 +extern atomic_unchecked_t qps_created;
31570 +extern atomic_unchecked_t qps_destroyed;
31571 +extern atomic_unchecked_t sw_qps_destroyed;
31572 extern u32 mh_detected;
31573 extern u32 mh_pauses_sent;
31574 extern u32 cm_packets_sent;
31575 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
31576 extern u32 cm_packets_received;
31577 extern u32 cm_packets_dropped;
31578 extern u32 cm_packets_retrans;
31579 -extern atomic_t cm_listens_created;
31580 -extern atomic_t cm_listens_destroyed;
31581 +extern atomic_unchecked_t cm_listens_created;
31582 +extern atomic_unchecked_t cm_listens_destroyed;
31583 extern u32 cm_backlog_drops;
31584 -extern atomic_t cm_loopbacks;
31585 -extern atomic_t cm_nodes_created;
31586 -extern atomic_t cm_nodes_destroyed;
31587 -extern atomic_t cm_accel_dropped_pkts;
31588 -extern atomic_t cm_resets_recvd;
31589 -extern atomic_t pau_qps_created;
31590 -extern atomic_t pau_qps_destroyed;
31591 +extern atomic_unchecked_t cm_loopbacks;
31592 +extern atomic_unchecked_t cm_nodes_created;
31593 +extern atomic_unchecked_t cm_nodes_destroyed;
31594 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31595 +extern atomic_unchecked_t cm_resets_recvd;
31596 +extern atomic_unchecked_t pau_qps_created;
31597 +extern atomic_unchecked_t pau_qps_destroyed;
31598
31599 extern u32 int_mod_timer_init;
31600 extern u32 int_mod_cq_depth_256;
31601 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
31602 index a4972ab..1bcfc31 100644
31603 --- a/drivers/infiniband/hw/nes/nes_cm.c
31604 +++ b/drivers/infiniband/hw/nes/nes_cm.c
31605 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
31606 u32 cm_packets_retrans;
31607 u32 cm_packets_created;
31608 u32 cm_packets_received;
31609 -atomic_t cm_listens_created;
31610 -atomic_t cm_listens_destroyed;
31611 +atomic_unchecked_t cm_listens_created;
31612 +atomic_unchecked_t cm_listens_destroyed;
31613 u32 cm_backlog_drops;
31614 -atomic_t cm_loopbacks;
31615 -atomic_t cm_nodes_created;
31616 -atomic_t cm_nodes_destroyed;
31617 -atomic_t cm_accel_dropped_pkts;
31618 -atomic_t cm_resets_recvd;
31619 +atomic_unchecked_t cm_loopbacks;
31620 +atomic_unchecked_t cm_nodes_created;
31621 +atomic_unchecked_t cm_nodes_destroyed;
31622 +atomic_unchecked_t cm_accel_dropped_pkts;
31623 +atomic_unchecked_t cm_resets_recvd;
31624
31625 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
31626 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
31627 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
31628
31629 static struct nes_cm_core *g_cm_core;
31630
31631 -atomic_t cm_connects;
31632 -atomic_t cm_accepts;
31633 -atomic_t cm_disconnects;
31634 -atomic_t cm_closes;
31635 -atomic_t cm_connecteds;
31636 -atomic_t cm_connect_reqs;
31637 -atomic_t cm_rejects;
31638 +atomic_unchecked_t cm_connects;
31639 +atomic_unchecked_t cm_accepts;
31640 +atomic_unchecked_t cm_disconnects;
31641 +atomic_unchecked_t cm_closes;
31642 +atomic_unchecked_t cm_connecteds;
31643 +atomic_unchecked_t cm_connect_reqs;
31644 +atomic_unchecked_t cm_rejects;
31645
31646 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
31647 {
31648 @@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
31649 kfree(listener);
31650 listener = NULL;
31651 ret = 0;
31652 - atomic_inc(&cm_listens_destroyed);
31653 + atomic_inc_unchecked(&cm_listens_destroyed);
31654 } else {
31655 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
31656 }
31657 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
31658 cm_node->rem_mac);
31659
31660 add_hte_node(cm_core, cm_node);
31661 - atomic_inc(&cm_nodes_created);
31662 + atomic_inc_unchecked(&cm_nodes_created);
31663
31664 return cm_node;
31665 }
31666 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
31667 }
31668
31669 atomic_dec(&cm_core->node_cnt);
31670 - atomic_inc(&cm_nodes_destroyed);
31671 + atomic_inc_unchecked(&cm_nodes_destroyed);
31672 nesqp = cm_node->nesqp;
31673 if (nesqp) {
31674 nesqp->cm_node = NULL;
31675 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
31676
31677 static void drop_packet(struct sk_buff *skb)
31678 {
31679 - atomic_inc(&cm_accel_dropped_pkts);
31680 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31681 dev_kfree_skb_any(skb);
31682 }
31683
31684 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
31685 {
31686
31687 int reset = 0; /* whether to send reset in case of err.. */
31688 - atomic_inc(&cm_resets_recvd);
31689 + atomic_inc_unchecked(&cm_resets_recvd);
31690 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31691 " refcnt=%d\n", cm_node, cm_node->state,
31692 atomic_read(&cm_node->ref_count));
31693 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
31694 rem_ref_cm_node(cm_node->cm_core, cm_node);
31695 return NULL;
31696 }
31697 - atomic_inc(&cm_loopbacks);
31698 + atomic_inc_unchecked(&cm_loopbacks);
31699 loopbackremotenode->loopbackpartner = cm_node;
31700 loopbackremotenode->tcp_cntxt.rcv_wscale =
31701 NES_CM_DEFAULT_RCV_WND_SCALE;
31702 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
31703 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
31704 else {
31705 rem_ref_cm_node(cm_core, cm_node);
31706 - atomic_inc(&cm_accel_dropped_pkts);
31707 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31708 dev_kfree_skb_any(skb);
31709 }
31710 break;
31711 @@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31712
31713 if ((cm_id) && (cm_id->event_handler)) {
31714 if (issue_disconn) {
31715 - atomic_inc(&cm_disconnects);
31716 + atomic_inc_unchecked(&cm_disconnects);
31717 cm_event.event = IW_CM_EVENT_DISCONNECT;
31718 cm_event.status = disconn_status;
31719 cm_event.local_addr = cm_id->local_addr;
31720 @@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31721 }
31722
31723 if (issue_close) {
31724 - atomic_inc(&cm_closes);
31725 + atomic_inc_unchecked(&cm_closes);
31726 nes_disconnect(nesqp, 1);
31727
31728 cm_id->provider_data = nesqp;
31729 @@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31730
31731 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31732 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31733 - atomic_inc(&cm_accepts);
31734 + atomic_inc_unchecked(&cm_accepts);
31735
31736 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31737 netdev_refcnt_read(nesvnic->netdev));
31738 @@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
31739 struct nes_cm_core *cm_core;
31740 u8 *start_buff;
31741
31742 - atomic_inc(&cm_rejects);
31743 + atomic_inc_unchecked(&cm_rejects);
31744 cm_node = (struct nes_cm_node *)cm_id->provider_data;
31745 loopback = cm_node->loopbackpartner;
31746 cm_core = cm_node->cm_core;
31747 @@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31748 ntohl(cm_id->local_addr.sin_addr.s_addr),
31749 ntohs(cm_id->local_addr.sin_port));
31750
31751 - atomic_inc(&cm_connects);
31752 + atomic_inc_unchecked(&cm_connects);
31753 nesqp->active_conn = 1;
31754
31755 /* cache the cm_id in the qp */
31756 @@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
31757 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
31758 return err;
31759 }
31760 - atomic_inc(&cm_listens_created);
31761 + atomic_inc_unchecked(&cm_listens_created);
31762 }
31763
31764 cm_id->add_ref(cm_id);
31765 @@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
31766
31767 if (nesqp->destroyed)
31768 return;
31769 - atomic_inc(&cm_connecteds);
31770 + atomic_inc_unchecked(&cm_connecteds);
31771 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31772 " local port 0x%04X. jiffies = %lu.\n",
31773 nesqp->hwqp.qp_id,
31774 @@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
31775
31776 cm_id->add_ref(cm_id);
31777 ret = cm_id->event_handler(cm_id, &cm_event);
31778 - atomic_inc(&cm_closes);
31779 + atomic_inc_unchecked(&cm_closes);
31780 cm_event.event = IW_CM_EVENT_CLOSE;
31781 cm_event.status = 0;
31782 cm_event.provider_data = cm_id->provider_data;
31783 @@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
31784 return;
31785 cm_id = cm_node->cm_id;
31786
31787 - atomic_inc(&cm_connect_reqs);
31788 + atomic_inc_unchecked(&cm_connect_reqs);
31789 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31790 cm_node, cm_id, jiffies);
31791
31792 @@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
31793 return;
31794 cm_id = cm_node->cm_id;
31795
31796 - atomic_inc(&cm_connect_reqs);
31797 + atomic_inc_unchecked(&cm_connect_reqs);
31798 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31799 cm_node, cm_id, jiffies);
31800
31801 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
31802 index 3ba7be3..c81f6ff 100644
31803 --- a/drivers/infiniband/hw/nes/nes_mgt.c
31804 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
31805 @@ -40,8 +40,8 @@
31806 #include "nes.h"
31807 #include "nes_mgt.h"
31808
31809 -atomic_t pau_qps_created;
31810 -atomic_t pau_qps_destroyed;
31811 +atomic_unchecked_t pau_qps_created;
31812 +atomic_unchecked_t pau_qps_destroyed;
31813
31814 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
31815 {
31816 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
31817 {
31818 struct sk_buff *skb;
31819 unsigned long flags;
31820 - atomic_inc(&pau_qps_destroyed);
31821 + atomic_inc_unchecked(&pau_qps_destroyed);
31822
31823 /* Free packets that have not yet been forwarded */
31824 /* Lock is acquired by skb_dequeue when removing the skb */
31825 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
31826 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
31827 skb_queue_head_init(&nesqp->pau_list);
31828 spin_lock_init(&nesqp->pau_lock);
31829 - atomic_inc(&pau_qps_created);
31830 + atomic_inc_unchecked(&pau_qps_created);
31831 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
31832 }
31833
31834 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
31835 index f3a3ecf..57d311d 100644
31836 --- a/drivers/infiniband/hw/nes/nes_nic.c
31837 +++ b/drivers/infiniband/hw/nes/nes_nic.c
31838 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
31839 target_stat_values[++index] = mh_detected;
31840 target_stat_values[++index] = mh_pauses_sent;
31841 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31842 - target_stat_values[++index] = atomic_read(&cm_connects);
31843 - target_stat_values[++index] = atomic_read(&cm_accepts);
31844 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31845 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31846 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31847 - target_stat_values[++index] = atomic_read(&cm_rejects);
31848 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31849 - target_stat_values[++index] = atomic_read(&qps_created);
31850 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31851 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31852 - target_stat_values[++index] = atomic_read(&cm_closes);
31853 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31854 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31855 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31856 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31857 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31858 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31859 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31860 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31861 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31862 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31863 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31864 target_stat_values[++index] = cm_packets_sent;
31865 target_stat_values[++index] = cm_packets_bounced;
31866 target_stat_values[++index] = cm_packets_created;
31867 target_stat_values[++index] = cm_packets_received;
31868 target_stat_values[++index] = cm_packets_dropped;
31869 target_stat_values[++index] = cm_packets_retrans;
31870 - target_stat_values[++index] = atomic_read(&cm_listens_created);
31871 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
31872 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
31873 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
31874 target_stat_values[++index] = cm_backlog_drops;
31875 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31876 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31877 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31878 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31879 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31880 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31881 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31882 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31883 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31884 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31885 target_stat_values[++index] = nesadapter->free_4kpbl;
31886 target_stat_values[++index] = nesadapter->free_256pbl;
31887 target_stat_values[++index] = int_mod_timer_init;
31888 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
31889 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
31890 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
31891 - target_stat_values[++index] = atomic_read(&pau_qps_created);
31892 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
31893 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
31894 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
31895 }
31896
31897 /**
31898 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
31899 index 0927b5c..ed67986 100644
31900 --- a/drivers/infiniband/hw/nes/nes_verbs.c
31901 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
31902 @@ -46,9 +46,9 @@
31903
31904 #include <rdma/ib_umem.h>
31905
31906 -atomic_t mod_qp_timouts;
31907 -atomic_t qps_created;
31908 -atomic_t sw_qps_destroyed;
31909 +atomic_unchecked_t mod_qp_timouts;
31910 +atomic_unchecked_t qps_created;
31911 +atomic_unchecked_t sw_qps_destroyed;
31912
31913 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31914
31915 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
31916 if (init_attr->create_flags)
31917 return ERR_PTR(-EINVAL);
31918
31919 - atomic_inc(&qps_created);
31920 + atomic_inc_unchecked(&qps_created);
31921 switch (init_attr->qp_type) {
31922 case IB_QPT_RC:
31923 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31924 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
31925 struct iw_cm_event cm_event;
31926 int ret = 0;
31927
31928 - atomic_inc(&sw_qps_destroyed);
31929 + atomic_inc_unchecked(&sw_qps_destroyed);
31930 nesqp->destroyed = 1;
31931
31932 /* Blow away the connection if it exists. */
31933 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
31934 index b881bdc..c2e360c 100644
31935 --- a/drivers/infiniband/hw/qib/qib.h
31936 +++ b/drivers/infiniband/hw/qib/qib.h
31937 @@ -51,6 +51,7 @@
31938 #include <linux/completion.h>
31939 #include <linux/kref.h>
31940 #include <linux/sched.h>
31941 +#include <linux/slab.h>
31942
31943 #include "qib_common.h"
31944 #include "qib_verbs.h"
31945 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
31946 index c351aa4..e6967c2 100644
31947 --- a/drivers/input/gameport/gameport.c
31948 +++ b/drivers/input/gameport/gameport.c
31949 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
31950 */
31951 static void gameport_init_port(struct gameport *gameport)
31952 {
31953 - static atomic_t gameport_no = ATOMIC_INIT(0);
31954 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31955
31956 __module_get(THIS_MODULE);
31957
31958 mutex_init(&gameport->drv_mutex);
31959 device_initialize(&gameport->dev);
31960 dev_set_name(&gameport->dev, "gameport%lu",
31961 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
31962 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31963 gameport->dev.bus = &gameport_bus;
31964 gameport->dev.release = gameport_release_port;
31965 if (gameport->parent)
31966 diff --git a/drivers/input/input.c b/drivers/input/input.c
31967 index 1f78c95..3cddc6c 100644
31968 --- a/drivers/input/input.c
31969 +++ b/drivers/input/input.c
31970 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
31971 */
31972 int input_register_device(struct input_dev *dev)
31973 {
31974 - static atomic_t input_no = ATOMIC_INIT(0);
31975 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31976 struct input_handler *handler;
31977 const char *path;
31978 int error;
31979 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
31980 dev->setkeycode = input_default_setkeycode;
31981
31982 dev_set_name(&dev->dev, "input%ld",
31983 - (unsigned long) atomic_inc_return(&input_no) - 1);
31984 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31985
31986 error = device_add(&dev->dev);
31987 if (error)
31988 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
31989 index b8d8611..7a4a04b 100644
31990 --- a/drivers/input/joystick/sidewinder.c
31991 +++ b/drivers/input/joystick/sidewinder.c
31992 @@ -30,6 +30,7 @@
31993 #include <linux/kernel.h>
31994 #include <linux/module.h>
31995 #include <linux/slab.h>
31996 +#include <linux/sched.h>
31997 #include <linux/init.h>
31998 #include <linux/input.h>
31999 #include <linux/gameport.h>
32000 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32001 index fd7a0d5..a4af10c 100644
32002 --- a/drivers/input/joystick/xpad.c
32003 +++ b/drivers/input/joystick/xpad.c
32004 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32005
32006 static int xpad_led_probe(struct usb_xpad *xpad)
32007 {
32008 - static atomic_t led_seq = ATOMIC_INIT(0);
32009 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32010 long led_no;
32011 struct xpad_led *led;
32012 struct led_classdev *led_cdev;
32013 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32014 if (!led)
32015 return -ENOMEM;
32016
32017 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32018 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32019
32020 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32021 led->xpad = xpad;
32022 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32023 index 0110b5a..d3ad144 100644
32024 --- a/drivers/input/mousedev.c
32025 +++ b/drivers/input/mousedev.c
32026 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32027
32028 spin_unlock_irq(&client->packet_lock);
32029
32030 - if (copy_to_user(buffer, data, count))
32031 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32032 return -EFAULT;
32033
32034 return count;
32035 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32036 index ba70058..571d25d 100644
32037 --- a/drivers/input/serio/serio.c
32038 +++ b/drivers/input/serio/serio.c
32039 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32040 */
32041 static void serio_init_port(struct serio *serio)
32042 {
32043 - static atomic_t serio_no = ATOMIC_INIT(0);
32044 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32045
32046 __module_get(THIS_MODULE);
32047
32048 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32049 mutex_init(&serio->drv_mutex);
32050 device_initialize(&serio->dev);
32051 dev_set_name(&serio->dev, "serio%ld",
32052 - (long)atomic_inc_return(&serio_no) - 1);
32053 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32054 serio->dev.bus = &serio_bus;
32055 serio->dev.release = serio_release_port;
32056 serio->dev.groups = serio_device_attr_groups;
32057 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32058 index e44933d..9ba484a 100644
32059 --- a/drivers/isdn/capi/capi.c
32060 +++ b/drivers/isdn/capi/capi.c
32061 @@ -83,8 +83,8 @@ struct capiminor {
32062
32063 struct capi20_appl *ap;
32064 u32 ncci;
32065 - atomic_t datahandle;
32066 - atomic_t msgid;
32067 + atomic_unchecked_t datahandle;
32068 + atomic_unchecked_t msgid;
32069
32070 struct tty_port port;
32071 int ttyinstop;
32072 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32073 capimsg_setu16(s, 2, mp->ap->applid);
32074 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32075 capimsg_setu8 (s, 5, CAPI_RESP);
32076 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32077 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32078 capimsg_setu32(s, 8, mp->ncci);
32079 capimsg_setu16(s, 12, datahandle);
32080 }
32081 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32082 mp->outbytes -= len;
32083 spin_unlock_bh(&mp->outlock);
32084
32085 - datahandle = atomic_inc_return(&mp->datahandle);
32086 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32087 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32088 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32089 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32090 capimsg_setu16(skb->data, 2, mp->ap->applid);
32091 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32092 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32093 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32094 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32095 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32096 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32097 capimsg_setu16(skb->data, 16, len); /* Data length */
32098 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32099 index db621db..825ea1a 100644
32100 --- a/drivers/isdn/gigaset/common.c
32101 +++ b/drivers/isdn/gigaset/common.c
32102 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32103 cs->commands_pending = 0;
32104 cs->cur_at_seq = 0;
32105 cs->gotfwver = -1;
32106 - cs->open_count = 0;
32107 + local_set(&cs->open_count, 0);
32108 cs->dev = NULL;
32109 cs->tty = NULL;
32110 cs->tty_dev = NULL;
32111 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32112 index 212efaf..f187c6b 100644
32113 --- a/drivers/isdn/gigaset/gigaset.h
32114 +++ b/drivers/isdn/gigaset/gigaset.h
32115 @@ -35,6 +35,7 @@
32116 #include <linux/tty_driver.h>
32117 #include <linux/list.h>
32118 #include <linux/atomic.h>
32119 +#include <asm/local.h>
32120
32121 #define GIG_VERSION {0, 5, 0, 0}
32122 #define GIG_COMPAT {0, 4, 0, 0}
32123 @@ -433,7 +434,7 @@ struct cardstate {
32124 spinlock_t cmdlock;
32125 unsigned curlen, cmdbytes;
32126
32127 - unsigned open_count;
32128 + local_t open_count;
32129 struct tty_struct *tty;
32130 struct tasklet_struct if_wake_tasklet;
32131 unsigned control_state;
32132 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32133 index ee0a549..a7c9798 100644
32134 --- a/drivers/isdn/gigaset/interface.c
32135 +++ b/drivers/isdn/gigaset/interface.c
32136 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32137 }
32138 tty->driver_data = cs;
32139
32140 - ++cs->open_count;
32141 -
32142 - if (cs->open_count == 1) {
32143 + if (local_inc_return(&cs->open_count) == 1) {
32144 spin_lock_irqsave(&cs->lock, flags);
32145 cs->tty = tty;
32146 spin_unlock_irqrestore(&cs->lock, flags);
32147 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32148
32149 if (!cs->connected)
32150 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32151 - else if (!cs->open_count)
32152 + else if (!local_read(&cs->open_count))
32153 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32154 else {
32155 - if (!--cs->open_count) {
32156 + if (!local_dec_return(&cs->open_count)) {
32157 spin_lock_irqsave(&cs->lock, flags);
32158 cs->tty = NULL;
32159 spin_unlock_irqrestore(&cs->lock, flags);
32160 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32161 if (!cs->connected) {
32162 gig_dbg(DEBUG_IF, "not connected");
32163 retval = -ENODEV;
32164 - } else if (!cs->open_count)
32165 + } else if (!local_read(&cs->open_count))
32166 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32167 else {
32168 retval = 0;
32169 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32170 retval = -ENODEV;
32171 goto done;
32172 }
32173 - if (!cs->open_count) {
32174 + if (!local_read(&cs->open_count)) {
32175 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32176 retval = -ENODEV;
32177 goto done;
32178 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32179 if (!cs->connected) {
32180 gig_dbg(DEBUG_IF, "not connected");
32181 retval = -ENODEV;
32182 - } else if (!cs->open_count)
32183 + } else if (!local_read(&cs->open_count))
32184 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32185 else if (cs->mstate != MS_LOCKED) {
32186 dev_warn(cs->dev, "can't write to unlocked device\n");
32187 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32188
32189 if (!cs->connected)
32190 gig_dbg(DEBUG_IF, "not connected");
32191 - else if (!cs->open_count)
32192 + else if (!local_read(&cs->open_count))
32193 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32194 else if (cs->mstate != MS_LOCKED)
32195 dev_warn(cs->dev, "can't write to unlocked device\n");
32196 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32197
32198 if (!cs->connected)
32199 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32200 - else if (!cs->open_count)
32201 + else if (!local_read(&cs->open_count))
32202 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32203 else
32204 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32205 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32206
32207 if (!cs->connected)
32208 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32209 - else if (!cs->open_count)
32210 + else if (!local_read(&cs->open_count))
32211 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32212 else
32213 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32214 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32215 goto out;
32216 }
32217
32218 - if (!cs->open_count) {
32219 + if (!local_read(&cs->open_count)) {
32220 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32221 goto out;
32222 }
32223 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32224 index 2a57da59..e7a12ed 100644
32225 --- a/drivers/isdn/hardware/avm/b1.c
32226 +++ b/drivers/isdn/hardware/avm/b1.c
32227 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32228 }
32229 if (left) {
32230 if (t4file->user) {
32231 - if (copy_from_user(buf, dp, left))
32232 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32233 return -EFAULT;
32234 } else {
32235 memcpy(buf, dp, left);
32236 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32237 }
32238 if (left) {
32239 if (config->user) {
32240 - if (copy_from_user(buf, dp, left))
32241 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32242 return -EFAULT;
32243 } else {
32244 memcpy(buf, dp, left);
32245 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32246 index 85784a7..a19ca98 100644
32247 --- a/drivers/isdn/hardware/eicon/divasync.h
32248 +++ b/drivers/isdn/hardware/eicon/divasync.h
32249 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32250 } diva_didd_add_adapter_t;
32251 typedef struct _diva_didd_remove_adapter {
32252 IDI_CALL p_request;
32253 -} diva_didd_remove_adapter_t;
32254 +} __no_const diva_didd_remove_adapter_t;
32255 typedef struct _diva_didd_read_adapter_array {
32256 void * buffer;
32257 dword length;
32258 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32259 index a3bd163..8956575 100644
32260 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32261 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32262 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32263 typedef struct _diva_os_idi_adapter_interface {
32264 diva_init_card_proc_t cleanup_adapter_proc;
32265 diva_cmd_card_proc_t cmd_proc;
32266 -} diva_os_idi_adapter_interface_t;
32267 +} __no_const diva_os_idi_adapter_interface_t;
32268
32269 typedef struct _diva_os_xdi_adapter {
32270 struct list_head link;
32271 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32272 index 1f355bb..43f1fea 100644
32273 --- a/drivers/isdn/icn/icn.c
32274 +++ b/drivers/isdn/icn/icn.c
32275 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32276 if (count > len)
32277 count = len;
32278 if (user) {
32279 - if (copy_from_user(msg, buf, count))
32280 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32281 return -EFAULT;
32282 } else
32283 memcpy(msg, buf, count);
32284 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32285 index b5fdcb7..5b6c59f 100644
32286 --- a/drivers/lguest/core.c
32287 +++ b/drivers/lguest/core.c
32288 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32289 * it's worked so far. The end address needs +1 because __get_vm_area
32290 * allocates an extra guard page, so we need space for that.
32291 */
32292 +
32293 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32294 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32295 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32296 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32297 +#else
32298 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32299 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32300 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32301 +#endif
32302 +
32303 if (!switcher_vma) {
32304 err = -ENOMEM;
32305 printk("lguest: could not map switcher pages high\n");
32306 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32307 * Now the Switcher is mapped at the right address, we can't fail!
32308 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32309 */
32310 - memcpy(switcher_vma->addr, start_switcher_text,
32311 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32312 end_switcher_text - start_switcher_text);
32313
32314 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32315 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32316 index 3980903..ce25c5e 100644
32317 --- a/drivers/lguest/x86/core.c
32318 +++ b/drivers/lguest/x86/core.c
32319 @@ -59,7 +59,7 @@ static struct {
32320 /* Offset from where switcher.S was compiled to where we've copied it */
32321 static unsigned long switcher_offset(void)
32322 {
32323 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32324 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32325 }
32326
32327 /* This cpu's struct lguest_pages. */
32328 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32329 * These copies are pretty cheap, so we do them unconditionally: */
32330 /* Save the current Host top-level page directory.
32331 */
32332 +
32333 +#ifdef CONFIG_PAX_PER_CPU_PGD
32334 + pages->state.host_cr3 = read_cr3();
32335 +#else
32336 pages->state.host_cr3 = __pa(current->mm->pgd);
32337 +#endif
32338 +
32339 /*
32340 * Set up the Guest's page tables to see this CPU's pages (and no
32341 * other CPU's pages).
32342 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32343 * compiled-in switcher code and the high-mapped copy we just made.
32344 */
32345 for (i = 0; i < IDT_ENTRIES; i++)
32346 - default_idt_entries[i] += switcher_offset();
32347 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32348
32349 /*
32350 * Set up the Switcher's per-cpu areas.
32351 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32352 * it will be undisturbed when we switch. To change %cs and jump we
32353 * need this structure to feed to Intel's "lcall" instruction.
32354 */
32355 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32356 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32357 lguest_entry.segment = LGUEST_CS;
32358
32359 /*
32360 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32361 index 40634b0..4f5855e 100644
32362 --- a/drivers/lguest/x86/switcher_32.S
32363 +++ b/drivers/lguest/x86/switcher_32.S
32364 @@ -87,6 +87,7 @@
32365 #include <asm/page.h>
32366 #include <asm/segment.h>
32367 #include <asm/lguest.h>
32368 +#include <asm/processor-flags.h>
32369
32370 // We mark the start of the code to copy
32371 // It's placed in .text tho it's never run here
32372 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32373 // Changes type when we load it: damn Intel!
32374 // For after we switch over our page tables
32375 // That entry will be read-only: we'd crash.
32376 +
32377 +#ifdef CONFIG_PAX_KERNEXEC
32378 + mov %cr0, %edx
32379 + xor $X86_CR0_WP, %edx
32380 + mov %edx, %cr0
32381 +#endif
32382 +
32383 movl $(GDT_ENTRY_TSS*8), %edx
32384 ltr %dx
32385
32386 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32387 // Let's clear it again for our return.
32388 // The GDT descriptor of the Host
32389 // Points to the table after two "size" bytes
32390 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32391 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32392 // Clear "used" from type field (byte 5, bit 2)
32393 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32394 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32395 +
32396 +#ifdef CONFIG_PAX_KERNEXEC
32397 + mov %cr0, %eax
32398 + xor $X86_CR0_WP, %eax
32399 + mov %eax, %cr0
32400 +#endif
32401
32402 // Once our page table's switched, the Guest is live!
32403 // The Host fades as we run this final step.
32404 @@ -295,13 +309,12 @@ deliver_to_host:
32405 // I consulted gcc, and it gave
32406 // These instructions, which I gladly credit:
32407 leal (%edx,%ebx,8), %eax
32408 - movzwl (%eax),%edx
32409 - movl 4(%eax), %eax
32410 - xorw %ax, %ax
32411 - orl %eax, %edx
32412 + movl 4(%eax), %edx
32413 + movw (%eax), %dx
32414 // Now the address of the handler's in %edx
32415 // We call it now: its "iret" drops us home.
32416 - jmp *%edx
32417 + ljmp $__KERNEL_CS, $1f
32418 +1: jmp *%edx
32419
32420 // Every interrupt can come to us here
32421 // But we must truly tell each apart.
32422 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32423 index 4daf9e5..b8d1d0f 100644
32424 --- a/drivers/macintosh/macio_asic.c
32425 +++ b/drivers/macintosh/macio_asic.c
32426 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32427 * MacIO is matched against any Apple ID, it's probe() function
32428 * will then decide wether it applies or not
32429 */
32430 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32431 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32432 .vendor = PCI_VENDOR_ID_APPLE,
32433 .device = PCI_ANY_ID,
32434 .subvendor = PCI_ANY_ID,
32435 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32436 index 1ce84ed..0fdd40a 100644
32437 --- a/drivers/md/dm-ioctl.c
32438 +++ b/drivers/md/dm-ioctl.c
32439 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32440 cmd == DM_LIST_VERSIONS_CMD)
32441 return 0;
32442
32443 - if ((cmd == DM_DEV_CREATE_CMD)) {
32444 + if (cmd == DM_DEV_CREATE_CMD) {
32445 if (!*param->name) {
32446 DMWARN("name not supplied when creating device");
32447 return -EINVAL;
32448 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32449 index 9bfd057..01180bc 100644
32450 --- a/drivers/md/dm-raid1.c
32451 +++ b/drivers/md/dm-raid1.c
32452 @@ -40,7 +40,7 @@ enum dm_raid1_error {
32453
32454 struct mirror {
32455 struct mirror_set *ms;
32456 - atomic_t error_count;
32457 + atomic_unchecked_t error_count;
32458 unsigned long error_type;
32459 struct dm_dev *dev;
32460 sector_t offset;
32461 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32462 struct mirror *m;
32463
32464 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32465 - if (!atomic_read(&m->error_count))
32466 + if (!atomic_read_unchecked(&m->error_count))
32467 return m;
32468
32469 return NULL;
32470 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
32471 * simple way to tell if a device has encountered
32472 * errors.
32473 */
32474 - atomic_inc(&m->error_count);
32475 + atomic_inc_unchecked(&m->error_count);
32476
32477 if (test_and_set_bit(error_type, &m->error_type))
32478 return;
32479 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
32480 struct mirror *m = get_default_mirror(ms);
32481
32482 do {
32483 - if (likely(!atomic_read(&m->error_count)))
32484 + if (likely(!atomic_read_unchecked(&m->error_count)))
32485 return m;
32486
32487 if (m-- == ms->mirror)
32488 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
32489 {
32490 struct mirror *default_mirror = get_default_mirror(m->ms);
32491
32492 - return !atomic_read(&default_mirror->error_count);
32493 + return !atomic_read_unchecked(&default_mirror->error_count);
32494 }
32495
32496 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32497 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
32498 */
32499 if (likely(region_in_sync(ms, region, 1)))
32500 m = choose_mirror(ms, bio->bi_sector);
32501 - else if (m && atomic_read(&m->error_count))
32502 + else if (m && atomic_read_unchecked(&m->error_count))
32503 m = NULL;
32504
32505 if (likely(m))
32506 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
32507 }
32508
32509 ms->mirror[mirror].ms = ms;
32510 - atomic_set(&(ms->mirror[mirror].error_count), 0);
32511 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32512 ms->mirror[mirror].error_type = 0;
32513 ms->mirror[mirror].offset = offset;
32514
32515 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
32516 */
32517 static char device_status_char(struct mirror *m)
32518 {
32519 - if (!atomic_read(&(m->error_count)))
32520 + if (!atomic_read_unchecked(&(m->error_count)))
32521 return 'A';
32522
32523 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
32524 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
32525 index 3d80cf0..b77cc47 100644
32526 --- a/drivers/md/dm-stripe.c
32527 +++ b/drivers/md/dm-stripe.c
32528 @@ -20,7 +20,7 @@ struct stripe {
32529 struct dm_dev *dev;
32530 sector_t physical_start;
32531
32532 - atomic_t error_count;
32533 + atomic_unchecked_t error_count;
32534 };
32535
32536 struct stripe_c {
32537 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
32538 kfree(sc);
32539 return r;
32540 }
32541 - atomic_set(&(sc->stripe[i].error_count), 0);
32542 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32543 }
32544
32545 ti->private = sc;
32546 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
32547 DMEMIT("%d ", sc->stripes);
32548 for (i = 0; i < sc->stripes; i++) {
32549 DMEMIT("%s ", sc->stripe[i].dev->name);
32550 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32551 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32552 'D' : 'A';
32553 }
32554 buffer[i] = '\0';
32555 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
32556 */
32557 for (i = 0; i < sc->stripes; i++)
32558 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32559 - atomic_inc(&(sc->stripe[i].error_count));
32560 - if (atomic_read(&(sc->stripe[i].error_count)) <
32561 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
32562 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32563 DM_IO_ERROR_THRESHOLD)
32564 schedule_work(&sc->trigger_event);
32565 }
32566 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
32567 index 63cc542..8d45caf3 100644
32568 --- a/drivers/md/dm-table.c
32569 +++ b/drivers/md/dm-table.c
32570 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
32571 if (!dev_size)
32572 return 0;
32573
32574 - if ((start >= dev_size) || (start + len > dev_size)) {
32575 + if ((start >= dev_size) || (len > dev_size - start)) {
32576 DMWARN("%s: %s too small for target: "
32577 "start=%llu, len=%llu, dev_size=%llu",
32578 dm_device_name(ti->table->md), bdevname(bdev, b),
32579 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
32580 index 237571a..fb6d19b 100644
32581 --- a/drivers/md/dm-thin-metadata.c
32582 +++ b/drivers/md/dm-thin-metadata.c
32583 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32584
32585 pmd->info.tm = tm;
32586 pmd->info.levels = 2;
32587 - pmd->info.value_type.context = pmd->data_sm;
32588 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32589 pmd->info.value_type.size = sizeof(__le64);
32590 pmd->info.value_type.inc = data_block_inc;
32591 pmd->info.value_type.dec = data_block_dec;
32592 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32593
32594 pmd->bl_info.tm = tm;
32595 pmd->bl_info.levels = 1;
32596 - pmd->bl_info.value_type.context = pmd->data_sm;
32597 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32598 pmd->bl_info.value_type.size = sizeof(__le64);
32599 pmd->bl_info.value_type.inc = data_block_inc;
32600 pmd->bl_info.value_type.dec = data_block_dec;
32601 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
32602 index b89c548..2af3ce4 100644
32603 --- a/drivers/md/dm.c
32604 +++ b/drivers/md/dm.c
32605 @@ -176,9 +176,9 @@ struct mapped_device {
32606 /*
32607 * Event handling.
32608 */
32609 - atomic_t event_nr;
32610 + atomic_unchecked_t event_nr;
32611 wait_queue_head_t eventq;
32612 - atomic_t uevent_seq;
32613 + atomic_unchecked_t uevent_seq;
32614 struct list_head uevent_list;
32615 spinlock_t uevent_lock; /* Protect access to uevent_list */
32616
32617 @@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
32618 rwlock_init(&md->map_lock);
32619 atomic_set(&md->holders, 1);
32620 atomic_set(&md->open_count, 0);
32621 - atomic_set(&md->event_nr, 0);
32622 - atomic_set(&md->uevent_seq, 0);
32623 + atomic_set_unchecked(&md->event_nr, 0);
32624 + atomic_set_unchecked(&md->uevent_seq, 0);
32625 INIT_LIST_HEAD(&md->uevent_list);
32626 spin_lock_init(&md->uevent_lock);
32627
32628 @@ -1979,7 +1979,7 @@ static void event_callback(void *context)
32629
32630 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32631
32632 - atomic_inc(&md->event_nr);
32633 + atomic_inc_unchecked(&md->event_nr);
32634 wake_up(&md->eventq);
32635 }
32636
32637 @@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
32638
32639 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32640 {
32641 - return atomic_add_return(1, &md->uevent_seq);
32642 + return atomic_add_return_unchecked(1, &md->uevent_seq);
32643 }
32644
32645 uint32_t dm_get_event_nr(struct mapped_device *md)
32646 {
32647 - return atomic_read(&md->event_nr);
32648 + return atomic_read_unchecked(&md->event_nr);
32649 }
32650
32651 int dm_wait_event(struct mapped_device *md, int event_nr)
32652 {
32653 return wait_event_interruptible(md->eventq,
32654 - (event_nr != atomic_read(&md->event_nr)));
32655 + (event_nr != atomic_read_unchecked(&md->event_nr)));
32656 }
32657
32658 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32659 diff --git a/drivers/md/md.c b/drivers/md/md.c
32660 index ce88755..4d8686d 100644
32661 --- a/drivers/md/md.c
32662 +++ b/drivers/md/md.c
32663 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
32664 * start build, activate spare
32665 */
32666 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32667 -static atomic_t md_event_count;
32668 +static atomic_unchecked_t md_event_count;
32669 void md_new_event(struct mddev *mddev)
32670 {
32671 - atomic_inc(&md_event_count);
32672 + atomic_inc_unchecked(&md_event_count);
32673 wake_up(&md_event_waiters);
32674 }
32675 EXPORT_SYMBOL_GPL(md_new_event);
32676 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32677 */
32678 static void md_new_event_inintr(struct mddev *mddev)
32679 {
32680 - atomic_inc(&md_event_count);
32681 + atomic_inc_unchecked(&md_event_count);
32682 wake_up(&md_event_waiters);
32683 }
32684
32685 @@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
32686
32687 rdev->preferred_minor = 0xffff;
32688 rdev->data_offset = le64_to_cpu(sb->data_offset);
32689 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32690 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32691
32692 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32693 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32694 @@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
32695 else
32696 sb->resync_offset = cpu_to_le64(0);
32697
32698 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32699 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32700
32701 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32702 sb->size = cpu_to_le64(mddev->dev_sectors);
32703 @@ -2688,7 +2688,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
32704 static ssize_t
32705 errors_show(struct md_rdev *rdev, char *page)
32706 {
32707 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32708 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32709 }
32710
32711 static ssize_t
32712 @@ -2697,7 +2697,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
32713 char *e;
32714 unsigned long n = simple_strtoul(buf, &e, 10);
32715 if (*buf && (*e == 0 || *e == '\n')) {
32716 - atomic_set(&rdev->corrected_errors, n);
32717 + atomic_set_unchecked(&rdev->corrected_errors, n);
32718 return len;
32719 }
32720 return -EINVAL;
32721 @@ -3083,8 +3083,8 @@ int md_rdev_init(struct md_rdev *rdev)
32722 rdev->sb_loaded = 0;
32723 rdev->bb_page = NULL;
32724 atomic_set(&rdev->nr_pending, 0);
32725 - atomic_set(&rdev->read_errors, 0);
32726 - atomic_set(&rdev->corrected_errors, 0);
32727 + atomic_set_unchecked(&rdev->read_errors, 0);
32728 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32729
32730 INIT_LIST_HEAD(&rdev->same_set);
32731 init_waitqueue_head(&rdev->blocked_wait);
32732 @@ -6735,7 +6735,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32733
32734 spin_unlock(&pers_lock);
32735 seq_printf(seq, "\n");
32736 - seq->poll_event = atomic_read(&md_event_count);
32737 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32738 return 0;
32739 }
32740 if (v == (void*)2) {
32741 @@ -6827,7 +6827,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32742 chunk_kb ? "KB" : "B");
32743 if (bitmap->file) {
32744 seq_printf(seq, ", file: ");
32745 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32746 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32747 }
32748
32749 seq_printf(seq, "\n");
32750 @@ -6858,7 +6858,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
32751 return error;
32752
32753 seq = file->private_data;
32754 - seq->poll_event = atomic_read(&md_event_count);
32755 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32756 return error;
32757 }
32758
32759 @@ -6872,7 +6872,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
32760 /* always allow read */
32761 mask = POLLIN | POLLRDNORM;
32762
32763 - if (seq->poll_event != atomic_read(&md_event_count))
32764 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
32765 mask |= POLLERR | POLLPRI;
32766 return mask;
32767 }
32768 @@ -6916,7 +6916,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
32769 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32770 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32771 (int)part_stat_read(&disk->part0, sectors[1]) -
32772 - atomic_read(&disk->sync_io);
32773 + atomic_read_unchecked(&disk->sync_io);
32774 /* sync IO will cause sync_io to increase before the disk_stats
32775 * as sync_io is counted when a request starts, and
32776 * disk_stats is counted when it completes.
32777 diff --git a/drivers/md/md.h b/drivers/md/md.h
32778 index 44c63df..b795d1a 100644
32779 --- a/drivers/md/md.h
32780 +++ b/drivers/md/md.h
32781 @@ -93,13 +93,13 @@ struct md_rdev {
32782 * only maintained for arrays that
32783 * support hot removal
32784 */
32785 - atomic_t read_errors; /* number of consecutive read errors that
32786 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32787 * we have tried to ignore.
32788 */
32789 struct timespec last_read_error; /* monotonic time since our
32790 * last read error
32791 */
32792 - atomic_t corrected_errors; /* number of corrected read errors,
32793 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32794 * for reporting to userspace and storing
32795 * in superblock.
32796 */
32797 @@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
32798
32799 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32800 {
32801 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32802 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32803 }
32804
32805 struct md_personality
32806 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
32807 index 50ed53b..4f29d7d 100644
32808 --- a/drivers/md/persistent-data/dm-space-map-checker.c
32809 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
32810 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
32811 /*----------------------------------------------------------------*/
32812
32813 struct sm_checker {
32814 - struct dm_space_map sm;
32815 + dm_space_map_no_const sm;
32816
32817 struct count_array old_counts;
32818 struct count_array counts;
32819 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
32820 index fc469ba..2d91555 100644
32821 --- a/drivers/md/persistent-data/dm-space-map-disk.c
32822 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
32823 @@ -23,7 +23,7 @@
32824 * Space map interface.
32825 */
32826 struct sm_disk {
32827 - struct dm_space_map sm;
32828 + dm_space_map_no_const sm;
32829
32830 struct ll_disk ll;
32831 struct ll_disk old_ll;
32832 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
32833 index e89ae5e..062e4c2 100644
32834 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
32835 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
32836 @@ -43,7 +43,7 @@ struct block_op {
32837 };
32838
32839 struct sm_metadata {
32840 - struct dm_space_map sm;
32841 + dm_space_map_no_const sm;
32842
32843 struct ll_disk ll;
32844 struct ll_disk old_ll;
32845 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
32846 index 1cbfc6b..56e1dbb 100644
32847 --- a/drivers/md/persistent-data/dm-space-map.h
32848 +++ b/drivers/md/persistent-data/dm-space-map.h
32849 @@ -60,6 +60,7 @@ struct dm_space_map {
32850 int (*root_size)(struct dm_space_map *sm, size_t *result);
32851 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
32852 };
32853 +typedef struct dm_space_map __no_const dm_space_map_no_const;
32854
32855 /*----------------------------------------------------------------*/
32856
32857 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
32858 index a0b225e..a9be913 100644
32859 --- a/drivers/md/raid1.c
32860 +++ b/drivers/md/raid1.c
32861 @@ -1632,7 +1632,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
32862 if (r1_sync_page_io(rdev, sect, s,
32863 bio->bi_io_vec[idx].bv_page,
32864 READ) != 0)
32865 - atomic_add(s, &rdev->corrected_errors);
32866 + atomic_add_unchecked(s, &rdev->corrected_errors);
32867 }
32868 sectors -= s;
32869 sect += s;
32870 @@ -1845,7 +1845,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
32871 test_bit(In_sync, &rdev->flags)) {
32872 if (r1_sync_page_io(rdev, sect, s,
32873 conf->tmppage, READ)) {
32874 - atomic_add(s, &rdev->corrected_errors);
32875 + atomic_add_unchecked(s, &rdev->corrected_errors);
32876 printk(KERN_INFO
32877 "md/raid1:%s: read error corrected "
32878 "(%d sectors at %llu on %s)\n",
32879 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
32880 index 58c44d6..f090bad 100644
32881 --- a/drivers/md/raid10.c
32882 +++ b/drivers/md/raid10.c
32883 @@ -1623,7 +1623,7 @@ static void end_sync_read(struct bio *bio, int error)
32884 /* The write handler will notice the lack of
32885 * R10BIO_Uptodate and record any errors etc
32886 */
32887 - atomic_add(r10_bio->sectors,
32888 + atomic_add_unchecked(r10_bio->sectors,
32889 &conf->mirrors[d].rdev->corrected_errors);
32890
32891 /* for reconstruct, we always reschedule after a read.
32892 @@ -1974,7 +1974,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32893 {
32894 struct timespec cur_time_mon;
32895 unsigned long hours_since_last;
32896 - unsigned int read_errors = atomic_read(&rdev->read_errors);
32897 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
32898
32899 ktime_get_ts(&cur_time_mon);
32900
32901 @@ -1996,9 +1996,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32902 * overflowing the shift of read_errors by hours_since_last.
32903 */
32904 if (hours_since_last >= 8 * sizeof(read_errors))
32905 - atomic_set(&rdev->read_errors, 0);
32906 + atomic_set_unchecked(&rdev->read_errors, 0);
32907 else
32908 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
32909 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
32910 }
32911
32912 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
32913 @@ -2052,8 +2052,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32914 return;
32915
32916 check_decay_read_errors(mddev, rdev);
32917 - atomic_inc(&rdev->read_errors);
32918 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
32919 + atomic_inc_unchecked(&rdev->read_errors);
32920 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
32921 char b[BDEVNAME_SIZE];
32922 bdevname(rdev->bdev, b);
32923
32924 @@ -2061,7 +2061,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32925 "md/raid10:%s: %s: Raid device exceeded "
32926 "read_error threshold [cur %d:max %d]\n",
32927 mdname(mddev), b,
32928 - atomic_read(&rdev->read_errors), max_read_errors);
32929 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
32930 printk(KERN_NOTICE
32931 "md/raid10:%s: %s: Failing raid device\n",
32932 mdname(mddev), b);
32933 @@ -2210,7 +2210,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32934 (unsigned long long)(
32935 sect + rdev->data_offset),
32936 bdevname(rdev->bdev, b));
32937 - atomic_add(s, &rdev->corrected_errors);
32938 + atomic_add_unchecked(s, &rdev->corrected_errors);
32939 }
32940
32941 rdev_dec_pending(rdev, mddev);
32942 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
32943 index 360f2b9..08b5382 100644
32944 --- a/drivers/md/raid5.c
32945 +++ b/drivers/md/raid5.c
32946 @@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
32947 (unsigned long long)(sh->sector
32948 + rdev->data_offset),
32949 bdevname(rdev->bdev, b));
32950 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
32951 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
32952 clear_bit(R5_ReadError, &sh->dev[i].flags);
32953 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32954 }
32955 - if (atomic_read(&rdev->read_errors))
32956 - atomic_set(&rdev->read_errors, 0);
32957 + if (atomic_read_unchecked(&rdev->read_errors))
32958 + atomic_set_unchecked(&rdev->read_errors, 0);
32959 } else {
32960 const char *bdn = bdevname(rdev->bdev, b);
32961 int retry = 0;
32962
32963 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32964 - atomic_inc(&rdev->read_errors);
32965 + atomic_inc_unchecked(&rdev->read_errors);
32966 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
32967 printk_ratelimited(
32968 KERN_WARNING
32969 @@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
32970 (unsigned long long)(sh->sector
32971 + rdev->data_offset),
32972 bdn);
32973 - else if (atomic_read(&rdev->read_errors)
32974 + else if (atomic_read_unchecked(&rdev->read_errors)
32975 > conf->max_nr_stripes)
32976 printk(KERN_WARNING
32977 "md/raid:%s: Too many read errors, failing device %s.\n",
32978 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
32979 index ce4f858..7bcfb46 100644
32980 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
32981 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
32982 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
32983 .subvendor = _subvend, .subdevice = _subdev, \
32984 .driver_data = (unsigned long)&_driverdata }
32985
32986 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
32987 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
32988 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
32989 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
32990 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
32991 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
32992 index a7d876f..8c21b61 100644
32993 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
32994 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
32995 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
32996 union {
32997 dmx_ts_cb ts;
32998 dmx_section_cb sec;
32999 - } cb;
33000 + } __no_const cb;
33001
33002 struct dvb_demux *demux;
33003 void *priv;
33004 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33005 index 00a6732..70a682e 100644
33006 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33007 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33008 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33009 const struct dvb_device *template, void *priv, int type)
33010 {
33011 struct dvb_device *dvbdev;
33012 - struct file_operations *dvbdevfops;
33013 + file_operations_no_const *dvbdevfops;
33014 struct device *clsdev;
33015 int minor;
33016 int id;
33017 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33018 index 3940bb0..fb3952a 100644
33019 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33020 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33021 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33022
33023 struct dib0700_adapter_state {
33024 int (*set_param_save) (struct dvb_frontend *);
33025 -};
33026 +} __no_const;
33027
33028 static int dib7070_set_param_override(struct dvb_frontend *fe)
33029 {
33030 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33031 index 451c5a7..649f711 100644
33032 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33033 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33034 @@ -95,7 +95,7 @@ struct su3000_state {
33035
33036 struct s6x0_state {
33037 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33038 -};
33039 +} __no_const;
33040
33041 /* debug */
33042 static int dvb_usb_dw2102_debug;
33043 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33044 index 404f63a..4796533 100644
33045 --- a/drivers/media/dvb/frontends/dib3000.h
33046 +++ b/drivers/media/dvb/frontends/dib3000.h
33047 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33048 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33049 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33050 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33051 -};
33052 +} __no_const;
33053
33054 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33055 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33056 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33057 index 8418c02..8555013 100644
33058 --- a/drivers/media/dvb/ngene/ngene-cards.c
33059 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33060 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33061
33062 /****************************************************************************/
33063
33064 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33065 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33066 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33067 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33068 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33069 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33070 index 16a089f..ab1667d 100644
33071 --- a/drivers/media/radio/radio-cadet.c
33072 +++ b/drivers/media/radio/radio-cadet.c
33073 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33074 unsigned char readbuf[RDS_BUFFER];
33075 int i = 0;
33076
33077 + if (count > RDS_BUFFER)
33078 + return -EFAULT;
33079 mutex_lock(&dev->lock);
33080 if (dev->rdsstat == 0) {
33081 dev->rdsstat = 1;
33082 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33083 index 9cde353..8c6a1c3 100644
33084 --- a/drivers/media/video/au0828/au0828.h
33085 +++ b/drivers/media/video/au0828/au0828.h
33086 @@ -191,7 +191,7 @@ struct au0828_dev {
33087
33088 /* I2C */
33089 struct i2c_adapter i2c_adap;
33090 - struct i2c_algorithm i2c_algo;
33091 + i2c_algorithm_no_const i2c_algo;
33092 struct i2c_client i2c_client;
33093 u32 i2c_rc;
33094
33095 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33096 index 04bf662..e0ac026 100644
33097 --- a/drivers/media/video/cx88/cx88-alsa.c
33098 +++ b/drivers/media/video/cx88/cx88-alsa.c
33099 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33100 * Only boards with eeprom and byte 1 at eeprom=1 have it
33101 */
33102
33103 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33104 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33105 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33106 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33107 {0, }
33108 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33109 index 1fb7d5b..3901e77 100644
33110 --- a/drivers/media/video/omap/omap_vout.c
33111 +++ b/drivers/media/video/omap/omap_vout.c
33112 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33113 OMAP_VIDEO2,
33114 };
33115
33116 -static struct videobuf_queue_ops video_vbq_ops;
33117 /* Variables configurable through module params*/
33118 static u32 video1_numbuffers = 3;
33119 static u32 video2_numbuffers = 3;
33120 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33121 {
33122 struct videobuf_queue *q;
33123 struct omap_vout_device *vout = NULL;
33124 + static struct videobuf_queue_ops video_vbq_ops = {
33125 + .buf_setup = omap_vout_buffer_setup,
33126 + .buf_prepare = omap_vout_buffer_prepare,
33127 + .buf_release = omap_vout_buffer_release,
33128 + .buf_queue = omap_vout_buffer_queue,
33129 + };
33130
33131 vout = video_drvdata(file);
33132 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33133 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33134 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33135
33136 q = &vout->vbq;
33137 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33138 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33139 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33140 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33141 spin_lock_init(&vout->vbq_lock);
33142
33143 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33144 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33145 index 305e6aa..0143317 100644
33146 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33147 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33148 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33149
33150 /* I2C stuff */
33151 struct i2c_adapter i2c_adap;
33152 - struct i2c_algorithm i2c_algo;
33153 + i2c_algorithm_no_const i2c_algo;
33154 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33155 int i2c_cx25840_hack_state;
33156 int i2c_linked;
33157 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33158 index 4ed1c7c2..8f15e13 100644
33159 --- a/drivers/media/video/timblogiw.c
33160 +++ b/drivers/media/video/timblogiw.c
33161 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33162
33163 /* Platform device functions */
33164
33165 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33166 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33167 .vidioc_querycap = timblogiw_querycap,
33168 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33169 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33170 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33171 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33172 };
33173
33174 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33175 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33176 .owner = THIS_MODULE,
33177 .open = timblogiw_open,
33178 .release = timblogiw_close,
33179 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33180 index a7dc467..a55c423 100644
33181 --- a/drivers/message/fusion/mptbase.c
33182 +++ b/drivers/message/fusion/mptbase.c
33183 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33184 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33185 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33186
33187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33188 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33189 +#else
33190 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33191 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33192 +#endif
33193 +
33194 /*
33195 * Rounding UP to nearest 4-kB boundary here...
33196 */
33197 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33198 index 551262e..7551198 100644
33199 --- a/drivers/message/fusion/mptsas.c
33200 +++ b/drivers/message/fusion/mptsas.c
33201 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33202 return 0;
33203 }
33204
33205 +static inline void
33206 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33207 +{
33208 + if (phy_info->port_details) {
33209 + phy_info->port_details->rphy = rphy;
33210 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33211 + ioc->name, rphy));
33212 + }
33213 +
33214 + if (rphy) {
33215 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33216 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33217 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33218 + ioc->name, rphy, rphy->dev.release));
33219 + }
33220 +}
33221 +
33222 /* no mutex */
33223 static void
33224 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33225 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33226 return NULL;
33227 }
33228
33229 -static inline void
33230 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33231 -{
33232 - if (phy_info->port_details) {
33233 - phy_info->port_details->rphy = rphy;
33234 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33235 - ioc->name, rphy));
33236 - }
33237 -
33238 - if (rphy) {
33239 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33240 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33241 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33242 - ioc->name, rphy, rphy->dev.release));
33243 - }
33244 -}
33245 -
33246 static inline struct sas_port *
33247 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33248 {
33249 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33250 index 0c3ced7..1fe34ec 100644
33251 --- a/drivers/message/fusion/mptscsih.c
33252 +++ b/drivers/message/fusion/mptscsih.c
33253 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33254
33255 h = shost_priv(SChost);
33256
33257 - if (h) {
33258 - if (h->info_kbuf == NULL)
33259 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33260 - return h->info_kbuf;
33261 - h->info_kbuf[0] = '\0';
33262 + if (!h)
33263 + return NULL;
33264
33265 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33266 - h->info_kbuf[size-1] = '\0';
33267 - }
33268 + if (h->info_kbuf == NULL)
33269 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33270 + return h->info_kbuf;
33271 + h->info_kbuf[0] = '\0';
33272 +
33273 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33274 + h->info_kbuf[size-1] = '\0';
33275
33276 return h->info_kbuf;
33277 }
33278 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33279 index 6d115c7..58ff7fd 100644
33280 --- a/drivers/message/i2o/i2o_proc.c
33281 +++ b/drivers/message/i2o/i2o_proc.c
33282 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33283 "Array Controller Device"
33284 };
33285
33286 -static char *chtostr(u8 * chars, int n)
33287 -{
33288 - char tmp[256];
33289 - tmp[0] = 0;
33290 - return strncat(tmp, (char *)chars, n);
33291 -}
33292 -
33293 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33294 char *group)
33295 {
33296 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33297
33298 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33299 seq_printf(seq, "%-#8x", ddm_table.module_id);
33300 - seq_printf(seq, "%-29s",
33301 - chtostr(ddm_table.module_name_version, 28));
33302 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33303 seq_printf(seq, "%9d ", ddm_table.data_size);
33304 seq_printf(seq, "%8d", ddm_table.code_size);
33305
33306 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33307
33308 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33309 seq_printf(seq, "%-#8x", dst->module_id);
33310 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33311 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33312 + seq_printf(seq, "%-.28s", dst->module_name_version);
33313 + seq_printf(seq, "%-.8s", dst->date);
33314 seq_printf(seq, "%8d ", dst->module_size);
33315 seq_printf(seq, "%8d ", dst->mpb_size);
33316 seq_printf(seq, "0x%04x", dst->module_flags);
33317 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33318 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33319 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33320 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33321 - seq_printf(seq, "Vendor info : %s\n",
33322 - chtostr((u8 *) (work32 + 2), 16));
33323 - seq_printf(seq, "Product info : %s\n",
33324 - chtostr((u8 *) (work32 + 6), 16));
33325 - seq_printf(seq, "Description : %s\n",
33326 - chtostr((u8 *) (work32 + 10), 16));
33327 - seq_printf(seq, "Product rev. : %s\n",
33328 - chtostr((u8 *) (work32 + 14), 8));
33329 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33330 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33331 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33332 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33333
33334 seq_printf(seq, "Serial number : ");
33335 print_serial_number(seq, (u8 *) (work32 + 16),
33336 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33337 }
33338
33339 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33340 - seq_printf(seq, "Module name : %s\n",
33341 - chtostr(result.module_name, 24));
33342 - seq_printf(seq, "Module revision : %s\n",
33343 - chtostr(result.module_rev, 8));
33344 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33345 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33346
33347 seq_printf(seq, "Serial number : ");
33348 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33349 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33350 return 0;
33351 }
33352
33353 - seq_printf(seq, "Device name : %s\n",
33354 - chtostr(result.device_name, 64));
33355 - seq_printf(seq, "Service name : %s\n",
33356 - chtostr(result.service_name, 64));
33357 - seq_printf(seq, "Physical name : %s\n",
33358 - chtostr(result.physical_location, 64));
33359 - seq_printf(seq, "Instance number : %s\n",
33360 - chtostr(result.instance_number, 4));
33361 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33362 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33363 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33364 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33365
33366 return 0;
33367 }
33368 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33369 index a8c08f3..155fe3d 100644
33370 --- a/drivers/message/i2o/iop.c
33371 +++ b/drivers/message/i2o/iop.c
33372 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33373
33374 spin_lock_irqsave(&c->context_list_lock, flags);
33375
33376 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33377 - atomic_inc(&c->context_list_counter);
33378 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33379 + atomic_inc_unchecked(&c->context_list_counter);
33380
33381 - entry->context = atomic_read(&c->context_list_counter);
33382 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33383
33384 list_add(&entry->list, &c->context_list);
33385
33386 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33387
33388 #if BITS_PER_LONG == 64
33389 spin_lock_init(&c->context_list_lock);
33390 - atomic_set(&c->context_list_counter, 0);
33391 + atomic_set_unchecked(&c->context_list_counter, 0);
33392 INIT_LIST_HEAD(&c->context_list);
33393 #endif
33394
33395 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33396 index 7ce65f4..e66e9bc 100644
33397 --- a/drivers/mfd/abx500-core.c
33398 +++ b/drivers/mfd/abx500-core.c
33399 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33400
33401 struct abx500_device_entry {
33402 struct list_head list;
33403 - struct abx500_ops ops;
33404 + abx500_ops_no_const ops;
33405 struct device *dev;
33406 };
33407
33408 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33409 index a9223ed..4127b13 100644
33410 --- a/drivers/mfd/janz-cmodio.c
33411 +++ b/drivers/mfd/janz-cmodio.c
33412 @@ -13,6 +13,7 @@
33413
33414 #include <linux/kernel.h>
33415 #include <linux/module.h>
33416 +#include <linux/slab.h>
33417 #include <linux/init.h>
33418 #include <linux/pci.h>
33419 #include <linux/interrupt.h>
33420 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33421 index a981e2a..5ca0c8b 100644
33422 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33423 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33424 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33425 * the lid is closed. This leads to interrupts as soon as a little move
33426 * is done.
33427 */
33428 - atomic_inc(&lis3->count);
33429 + atomic_inc_unchecked(&lis3->count);
33430
33431 wake_up_interruptible(&lis3->misc_wait);
33432 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33433 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33434 if (lis3->pm_dev)
33435 pm_runtime_get_sync(lis3->pm_dev);
33436
33437 - atomic_set(&lis3->count, 0);
33438 + atomic_set_unchecked(&lis3->count, 0);
33439 return 0;
33440 }
33441
33442 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33443 add_wait_queue(&lis3->misc_wait, &wait);
33444 while (true) {
33445 set_current_state(TASK_INTERRUPTIBLE);
33446 - data = atomic_xchg(&lis3->count, 0);
33447 + data = atomic_xchg_unchecked(&lis3->count, 0);
33448 if (data)
33449 break;
33450
33451 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33452 struct lis3lv02d, miscdev);
33453
33454 poll_wait(file, &lis3->misc_wait, wait);
33455 - if (atomic_read(&lis3->count))
33456 + if (atomic_read_unchecked(&lis3->count))
33457 return POLLIN | POLLRDNORM;
33458 return 0;
33459 }
33460 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33461 index 2b1482a..5d33616 100644
33462 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
33463 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33464 @@ -266,7 +266,7 @@ struct lis3lv02d {
33465 struct input_polled_dev *idev; /* input device */
33466 struct platform_device *pdev; /* platform device */
33467 struct regulator_bulk_data regulators[2];
33468 - atomic_t count; /* interrupt count after last read */
33469 + atomic_unchecked_t count; /* interrupt count after last read */
33470 union axis_conversion ac; /* hw -> logical axis */
33471 int mapped_btns[3];
33472
33473 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
33474 index 2f30bad..c4c13d0 100644
33475 --- a/drivers/misc/sgi-gru/gruhandles.c
33476 +++ b/drivers/misc/sgi-gru/gruhandles.c
33477 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33478 unsigned long nsec;
33479
33480 nsec = CLKS2NSEC(clks);
33481 - atomic_long_inc(&mcs_op_statistics[op].count);
33482 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
33483 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33484 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
33485 if (mcs_op_statistics[op].max < nsec)
33486 mcs_op_statistics[op].max = nsec;
33487 }
33488 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
33489 index 950dbe9..eeef0f8 100644
33490 --- a/drivers/misc/sgi-gru/gruprocfs.c
33491 +++ b/drivers/misc/sgi-gru/gruprocfs.c
33492 @@ -32,9 +32,9 @@
33493
33494 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33495
33496 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33497 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33498 {
33499 - unsigned long val = atomic_long_read(v);
33500 + unsigned long val = atomic_long_read_unchecked(v);
33501
33502 seq_printf(s, "%16lu %s\n", val, id);
33503 }
33504 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
33505
33506 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
33507 for (op = 0; op < mcsop_last; op++) {
33508 - count = atomic_long_read(&mcs_op_statistics[op].count);
33509 - total = atomic_long_read(&mcs_op_statistics[op].total);
33510 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33511 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33512 max = mcs_op_statistics[op].max;
33513 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33514 count ? total / count : 0, max);
33515 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
33516 index 5c3ce24..4915ccb 100644
33517 --- a/drivers/misc/sgi-gru/grutables.h
33518 +++ b/drivers/misc/sgi-gru/grutables.h
33519 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
33520 * GRU statistics.
33521 */
33522 struct gru_stats_s {
33523 - atomic_long_t vdata_alloc;
33524 - atomic_long_t vdata_free;
33525 - atomic_long_t gts_alloc;
33526 - atomic_long_t gts_free;
33527 - atomic_long_t gms_alloc;
33528 - atomic_long_t gms_free;
33529 - atomic_long_t gts_double_allocate;
33530 - atomic_long_t assign_context;
33531 - atomic_long_t assign_context_failed;
33532 - atomic_long_t free_context;
33533 - atomic_long_t load_user_context;
33534 - atomic_long_t load_kernel_context;
33535 - atomic_long_t lock_kernel_context;
33536 - atomic_long_t unlock_kernel_context;
33537 - atomic_long_t steal_user_context;
33538 - atomic_long_t steal_kernel_context;
33539 - atomic_long_t steal_context_failed;
33540 - atomic_long_t nopfn;
33541 - atomic_long_t asid_new;
33542 - atomic_long_t asid_next;
33543 - atomic_long_t asid_wrap;
33544 - atomic_long_t asid_reuse;
33545 - atomic_long_t intr;
33546 - atomic_long_t intr_cbr;
33547 - atomic_long_t intr_tfh;
33548 - atomic_long_t intr_spurious;
33549 - atomic_long_t intr_mm_lock_failed;
33550 - atomic_long_t call_os;
33551 - atomic_long_t call_os_wait_queue;
33552 - atomic_long_t user_flush_tlb;
33553 - atomic_long_t user_unload_context;
33554 - atomic_long_t user_exception;
33555 - atomic_long_t set_context_option;
33556 - atomic_long_t check_context_retarget_intr;
33557 - atomic_long_t check_context_unload;
33558 - atomic_long_t tlb_dropin;
33559 - atomic_long_t tlb_preload_page;
33560 - atomic_long_t tlb_dropin_fail_no_asid;
33561 - atomic_long_t tlb_dropin_fail_upm;
33562 - atomic_long_t tlb_dropin_fail_invalid;
33563 - atomic_long_t tlb_dropin_fail_range_active;
33564 - atomic_long_t tlb_dropin_fail_idle;
33565 - atomic_long_t tlb_dropin_fail_fmm;
33566 - atomic_long_t tlb_dropin_fail_no_exception;
33567 - atomic_long_t tfh_stale_on_fault;
33568 - atomic_long_t mmu_invalidate_range;
33569 - atomic_long_t mmu_invalidate_page;
33570 - atomic_long_t flush_tlb;
33571 - atomic_long_t flush_tlb_gru;
33572 - atomic_long_t flush_tlb_gru_tgh;
33573 - atomic_long_t flush_tlb_gru_zero_asid;
33574 + atomic_long_unchecked_t vdata_alloc;
33575 + atomic_long_unchecked_t vdata_free;
33576 + atomic_long_unchecked_t gts_alloc;
33577 + atomic_long_unchecked_t gts_free;
33578 + atomic_long_unchecked_t gms_alloc;
33579 + atomic_long_unchecked_t gms_free;
33580 + atomic_long_unchecked_t gts_double_allocate;
33581 + atomic_long_unchecked_t assign_context;
33582 + atomic_long_unchecked_t assign_context_failed;
33583 + atomic_long_unchecked_t free_context;
33584 + atomic_long_unchecked_t load_user_context;
33585 + atomic_long_unchecked_t load_kernel_context;
33586 + atomic_long_unchecked_t lock_kernel_context;
33587 + atomic_long_unchecked_t unlock_kernel_context;
33588 + atomic_long_unchecked_t steal_user_context;
33589 + atomic_long_unchecked_t steal_kernel_context;
33590 + atomic_long_unchecked_t steal_context_failed;
33591 + atomic_long_unchecked_t nopfn;
33592 + atomic_long_unchecked_t asid_new;
33593 + atomic_long_unchecked_t asid_next;
33594 + atomic_long_unchecked_t asid_wrap;
33595 + atomic_long_unchecked_t asid_reuse;
33596 + atomic_long_unchecked_t intr;
33597 + atomic_long_unchecked_t intr_cbr;
33598 + atomic_long_unchecked_t intr_tfh;
33599 + atomic_long_unchecked_t intr_spurious;
33600 + atomic_long_unchecked_t intr_mm_lock_failed;
33601 + atomic_long_unchecked_t call_os;
33602 + atomic_long_unchecked_t call_os_wait_queue;
33603 + atomic_long_unchecked_t user_flush_tlb;
33604 + atomic_long_unchecked_t user_unload_context;
33605 + atomic_long_unchecked_t user_exception;
33606 + atomic_long_unchecked_t set_context_option;
33607 + atomic_long_unchecked_t check_context_retarget_intr;
33608 + atomic_long_unchecked_t check_context_unload;
33609 + atomic_long_unchecked_t tlb_dropin;
33610 + atomic_long_unchecked_t tlb_preload_page;
33611 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33612 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33613 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33614 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33615 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33616 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33617 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33618 + atomic_long_unchecked_t tfh_stale_on_fault;
33619 + atomic_long_unchecked_t mmu_invalidate_range;
33620 + atomic_long_unchecked_t mmu_invalidate_page;
33621 + atomic_long_unchecked_t flush_tlb;
33622 + atomic_long_unchecked_t flush_tlb_gru;
33623 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33624 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33625
33626 - atomic_long_t copy_gpa;
33627 - atomic_long_t read_gpa;
33628 + atomic_long_unchecked_t copy_gpa;
33629 + atomic_long_unchecked_t read_gpa;
33630
33631 - atomic_long_t mesq_receive;
33632 - atomic_long_t mesq_receive_none;
33633 - atomic_long_t mesq_send;
33634 - atomic_long_t mesq_send_failed;
33635 - atomic_long_t mesq_noop;
33636 - atomic_long_t mesq_send_unexpected_error;
33637 - atomic_long_t mesq_send_lb_overflow;
33638 - atomic_long_t mesq_send_qlimit_reached;
33639 - atomic_long_t mesq_send_amo_nacked;
33640 - atomic_long_t mesq_send_put_nacked;
33641 - atomic_long_t mesq_page_overflow;
33642 - atomic_long_t mesq_qf_locked;
33643 - atomic_long_t mesq_qf_noop_not_full;
33644 - atomic_long_t mesq_qf_switch_head_failed;
33645 - atomic_long_t mesq_qf_unexpected_error;
33646 - atomic_long_t mesq_noop_unexpected_error;
33647 - atomic_long_t mesq_noop_lb_overflow;
33648 - atomic_long_t mesq_noop_qlimit_reached;
33649 - atomic_long_t mesq_noop_amo_nacked;
33650 - atomic_long_t mesq_noop_put_nacked;
33651 - atomic_long_t mesq_noop_page_overflow;
33652 + atomic_long_unchecked_t mesq_receive;
33653 + atomic_long_unchecked_t mesq_receive_none;
33654 + atomic_long_unchecked_t mesq_send;
33655 + atomic_long_unchecked_t mesq_send_failed;
33656 + atomic_long_unchecked_t mesq_noop;
33657 + atomic_long_unchecked_t mesq_send_unexpected_error;
33658 + atomic_long_unchecked_t mesq_send_lb_overflow;
33659 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33660 + atomic_long_unchecked_t mesq_send_amo_nacked;
33661 + atomic_long_unchecked_t mesq_send_put_nacked;
33662 + atomic_long_unchecked_t mesq_page_overflow;
33663 + atomic_long_unchecked_t mesq_qf_locked;
33664 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33665 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33666 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33667 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33668 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33669 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33670 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33671 + atomic_long_unchecked_t mesq_noop_put_nacked;
33672 + atomic_long_unchecked_t mesq_noop_page_overflow;
33673
33674 };
33675
33676 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
33677 tghop_invalidate, mcsop_last};
33678
33679 struct mcs_op_statistic {
33680 - atomic_long_t count;
33681 - atomic_long_t total;
33682 + atomic_long_unchecked_t count;
33683 + atomic_long_unchecked_t total;
33684 unsigned long max;
33685 };
33686
33687 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
33688
33689 #define STAT(id) do { \
33690 if (gru_options & OPT_STATS) \
33691 - atomic_long_inc(&gru_stats.id); \
33692 + atomic_long_inc_unchecked(&gru_stats.id); \
33693 } while (0)
33694
33695 #ifdef CONFIG_SGI_GRU_DEBUG
33696 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
33697 index 851b2f2..a4ec097 100644
33698 --- a/drivers/misc/sgi-xp/xp.h
33699 +++ b/drivers/misc/sgi-xp/xp.h
33700 @@ -289,7 +289,7 @@ struct xpc_interface {
33701 xpc_notify_func, void *);
33702 void (*received) (short, int, void *);
33703 enum xp_retval (*partid_to_nasids) (short, void *);
33704 -};
33705 +} __no_const;
33706
33707 extern struct xpc_interface xpc_interface;
33708
33709 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
33710 index b94d5f7..7f494c5 100644
33711 --- a/drivers/misc/sgi-xp/xpc.h
33712 +++ b/drivers/misc/sgi-xp/xpc.h
33713 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
33714 void (*received_payload) (struct xpc_channel *, void *);
33715 void (*notify_senders_of_disconnect) (struct xpc_channel *);
33716 };
33717 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
33718
33719 /* struct xpc_partition act_state values (for XPC HB) */
33720
33721 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
33722 /* found in xpc_main.c */
33723 extern struct device *xpc_part;
33724 extern struct device *xpc_chan;
33725 -extern struct xpc_arch_operations xpc_arch_ops;
33726 +extern xpc_arch_operations_no_const xpc_arch_ops;
33727 extern int xpc_disengage_timelimit;
33728 extern int xpc_disengage_timedout;
33729 extern int xpc_activate_IRQ_rcvd;
33730 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
33731 index 8d082b4..aa749ae 100644
33732 --- a/drivers/misc/sgi-xp/xpc_main.c
33733 +++ b/drivers/misc/sgi-xp/xpc_main.c
33734 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
33735 .notifier_call = xpc_system_die,
33736 };
33737
33738 -struct xpc_arch_operations xpc_arch_ops;
33739 +xpc_arch_operations_no_const xpc_arch_ops;
33740
33741 /*
33742 * Timer function to enforce the timelimit on the partition disengage.
33743 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
33744 index 6ebdc40..9edf5d8 100644
33745 --- a/drivers/mmc/host/sdhci-pci.c
33746 +++ b/drivers/mmc/host/sdhci-pci.c
33747 @@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
33748 .probe = via_probe,
33749 };
33750
33751 -static const struct pci_device_id pci_ids[] __devinitdata = {
33752 +static const struct pci_device_id pci_ids[] __devinitconst = {
33753 {
33754 .vendor = PCI_VENDOR_ID_RICOH,
33755 .device = PCI_DEVICE_ID_RICOH_R5C822,
33756 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
33757 index b1cdf64..ce6e438 100644
33758 --- a/drivers/mtd/devices/doc2000.c
33759 +++ b/drivers/mtd/devices/doc2000.c
33760 @@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
33761
33762 /* The ECC will not be calculated correctly if less than 512 is written */
33763 /* DBB-
33764 - if (len != 0x200 && eccbuf)
33765 + if (len != 0x200)
33766 printk(KERN_WARNING
33767 "ECC needs a full sector write (adr: %lx size %lx)\n",
33768 (long) to, (long) len);
33769 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
33770 index 7543b98..7069947 100644
33771 --- a/drivers/mtd/devices/doc2001.c
33772 +++ b/drivers/mtd/devices/doc2001.c
33773 @@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
33774 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33775
33776 /* Don't allow read past end of device */
33777 - if (from >= this->totlen)
33778 + if (from >= this->totlen || !len)
33779 return -EINVAL;
33780
33781 /* Don't allow a single read to cross a 512-byte block boundary */
33782 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
33783 index 3984d48..28aa897 100644
33784 --- a/drivers/mtd/nand/denali.c
33785 +++ b/drivers/mtd/nand/denali.c
33786 @@ -26,6 +26,7 @@
33787 #include <linux/pci.h>
33788 #include <linux/mtd/mtd.h>
33789 #include <linux/module.h>
33790 +#include <linux/slab.h>
33791
33792 #include "denali.h"
33793
33794 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
33795 index 51b9d6a..52af9a7 100644
33796 --- a/drivers/mtd/nftlmount.c
33797 +++ b/drivers/mtd/nftlmount.c
33798 @@ -24,6 +24,7 @@
33799 #include <asm/errno.h>
33800 #include <linux/delay.h>
33801 #include <linux/slab.h>
33802 +#include <linux/sched.h>
33803 #include <linux/mtd/mtd.h>
33804 #include <linux/mtd/nand.h>
33805 #include <linux/mtd/nftl.h>
33806 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
33807 index 115749f..3021f01 100644
33808 --- a/drivers/mtd/ubi/build.c
33809 +++ b/drivers/mtd/ubi/build.c
33810 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
33811 static int __init bytes_str_to_int(const char *str)
33812 {
33813 char *endp;
33814 - unsigned long result;
33815 + unsigned long result, scale = 1;
33816
33817 result = simple_strtoul(str, &endp, 0);
33818 if (str == endp || result >= INT_MAX) {
33819 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
33820
33821 switch (*endp) {
33822 case 'G':
33823 - result *= 1024;
33824 + scale *= 1024;
33825 case 'M':
33826 - result *= 1024;
33827 + scale *= 1024;
33828 case 'K':
33829 - result *= 1024;
33830 + scale *= 1024;
33831 if (endp[1] == 'i' && endp[2] == 'B')
33832 endp += 2;
33833 case '\0':
33834 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
33835 return -EINVAL;
33836 }
33837
33838 - return result;
33839 + if ((intoverflow_t)result*scale >= INT_MAX) {
33840 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33841 + str);
33842 + return -EINVAL;
33843 + }
33844 +
33845 + return result*scale;
33846 }
33847
33848 /**
33849 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
33850 index 071f4c8..440862e 100644
33851 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
33852 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
33853 @@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
33854 */
33855
33856 #define ATL2_PARAM(X, desc) \
33857 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33858 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33859 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
33860 MODULE_PARM_DESC(X, desc);
33861 #else
33862 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33863 index 66da39f..5dc436d 100644
33864 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33865 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33866 @@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
33867
33868 int (*wait_comp)(struct bnx2x *bp,
33869 struct bnx2x_rx_mode_ramrod_params *p);
33870 -};
33871 +} __no_const;
33872
33873 /********************** Set multicast group ***********************************/
33874
33875 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
33876 index aea8f72..fcebf75 100644
33877 --- a/drivers/net/ethernet/broadcom/tg3.h
33878 +++ b/drivers/net/ethernet/broadcom/tg3.h
33879 @@ -140,6 +140,7 @@
33880 #define CHIPREV_ID_5750_A0 0x4000
33881 #define CHIPREV_ID_5750_A1 0x4001
33882 #define CHIPREV_ID_5750_A3 0x4003
33883 +#define CHIPREV_ID_5750_C1 0x4201
33884 #define CHIPREV_ID_5750_C2 0x4202
33885 #define CHIPREV_ID_5752_A0_HW 0x5000
33886 #define CHIPREV_ID_5752_A0 0x6000
33887 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33888 index c4e8643..0979484 100644
33889 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33890 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33891 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33892 */
33893 struct l2t_skb_cb {
33894 arp_failure_handler_func arp_failure_handler;
33895 -};
33896 +} __no_const;
33897
33898 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33899
33900 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
33901 index 4d71f5a..8004440 100644
33902 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
33903 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
33904 @@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33905 for (i=0; i<ETH_ALEN; i++) {
33906 tmp.addr[i] = dev->dev_addr[i];
33907 }
33908 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33909 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33910 break;
33911
33912 case DE4X5_SET_HWADDR: /* Set the hardware address */
33913 @@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33914 spin_lock_irqsave(&lp->lock, flags);
33915 memcpy(&statbuf, &lp->pktStats, ioc->len);
33916 spin_unlock_irqrestore(&lp->lock, flags);
33917 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
33918 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
33919 return -EFAULT;
33920 break;
33921 }
33922 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
33923 index 14d5b61..1398636 100644
33924 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
33925 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
33926 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
33927 {NULL}};
33928
33929
33930 -static const char *block_name[] __devinitdata = {
33931 +static const char *block_name[] __devinitconst = {
33932 "21140 non-MII",
33933 "21140 MII PHY",
33934 "21142 Serial PHY",
33935 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
33936 index 52da7b2..4ddfe1c 100644
33937 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
33938 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
33939 @@ -236,7 +236,7 @@ struct pci_id_info {
33940 int drv_flags; /* Driver use, intended as capability flags. */
33941 };
33942
33943 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33944 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33945 { /* Sometime a Level-One switch card. */
33946 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
33947 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
33948 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
33949 index 28a3a9b..d96cb63 100644
33950 --- a/drivers/net/ethernet/dlink/sundance.c
33951 +++ b/drivers/net/ethernet/dlink/sundance.c
33952 @@ -218,7 +218,7 @@ enum {
33953 struct pci_id_info {
33954 const char *name;
33955 };
33956 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33957 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33958 {"D-Link DFE-550TX FAST Ethernet Adapter"},
33959 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
33960 {"D-Link DFE-580TX 4 port Server Adapter"},
33961 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
33962 index e703d64..d62ecf9 100644
33963 --- a/drivers/net/ethernet/emulex/benet/be_main.c
33964 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
33965 @@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
33966
33967 if (wrapped)
33968 newacc += 65536;
33969 - ACCESS_ONCE(*acc) = newacc;
33970 + ACCESS_ONCE_RW(*acc) = newacc;
33971 }
33972
33973 void be_parse_stats(struct be_adapter *adapter)
33974 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
33975 index 47f85c3..82ab6c4 100644
33976 --- a/drivers/net/ethernet/faraday/ftgmac100.c
33977 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
33978 @@ -31,6 +31,8 @@
33979 #include <linux/netdevice.h>
33980 #include <linux/phy.h>
33981 #include <linux/platform_device.h>
33982 +#include <linux/interrupt.h>
33983 +#include <linux/irqreturn.h>
33984 #include <net/ip.h>
33985
33986 #include "ftgmac100.h"
33987 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
33988 index bb336a0..4b472da 100644
33989 --- a/drivers/net/ethernet/faraday/ftmac100.c
33990 +++ b/drivers/net/ethernet/faraday/ftmac100.c
33991 @@ -31,6 +31,8 @@
33992 #include <linux/module.h>
33993 #include <linux/netdevice.h>
33994 #include <linux/platform_device.h>
33995 +#include <linux/interrupt.h>
33996 +#include <linux/irqreturn.h>
33997
33998 #include "ftmac100.h"
33999
34000 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34001 index c82d444..0007fb4 100644
34002 --- a/drivers/net/ethernet/fealnx.c
34003 +++ b/drivers/net/ethernet/fealnx.c
34004 @@ -150,7 +150,7 @@ struct chip_info {
34005 int flags;
34006 };
34007
34008 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34009 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34010 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34011 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34012 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34013 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34014 index e1159e5..e18684d 100644
34015 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34016 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34017 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34018 {
34019 struct e1000_hw *hw = &adapter->hw;
34020 struct e1000_mac_info *mac = &hw->mac;
34021 - struct e1000_mac_operations *func = &mac->ops;
34022 + e1000_mac_operations_no_const *func = &mac->ops;
34023
34024 /* Set media type */
34025 switch (adapter->pdev->device) {
34026 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34027 index a3e65fd..f451444 100644
34028 --- a/drivers/net/ethernet/intel/e1000e/82571.c
34029 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
34030 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34031 {
34032 struct e1000_hw *hw = &adapter->hw;
34033 struct e1000_mac_info *mac = &hw->mac;
34034 - struct e1000_mac_operations *func = &mac->ops;
34035 + e1000_mac_operations_no_const *func = &mac->ops;
34036 u32 swsm = 0;
34037 u32 swsm2 = 0;
34038 bool force_clear_smbi = false;
34039 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34040 index 2967039..ca8c40c 100644
34041 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34042 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34043 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
34044 void (*write_vfta)(struct e1000_hw *, u32, u32);
34045 s32 (*read_mac_addr)(struct e1000_hw *);
34046 };
34047 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34048
34049 /*
34050 * When to use various PHY register access functions:
34051 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
34052 void (*power_up)(struct e1000_hw *);
34053 void (*power_down)(struct e1000_hw *);
34054 };
34055 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34056
34057 /* Function pointers for the NVM. */
34058 struct e1000_nvm_operations {
34059 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34060 s32 (*validate)(struct e1000_hw *);
34061 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34062 };
34063 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34064
34065 struct e1000_mac_info {
34066 - struct e1000_mac_operations ops;
34067 + e1000_mac_operations_no_const ops;
34068 u8 addr[ETH_ALEN];
34069 u8 perm_addr[ETH_ALEN];
34070
34071 @@ -872,7 +875,7 @@ struct e1000_mac_info {
34072 };
34073
34074 struct e1000_phy_info {
34075 - struct e1000_phy_operations ops;
34076 + e1000_phy_operations_no_const ops;
34077
34078 enum e1000_phy_type type;
34079
34080 @@ -906,7 +909,7 @@ struct e1000_phy_info {
34081 };
34082
34083 struct e1000_nvm_info {
34084 - struct e1000_nvm_operations ops;
34085 + e1000_nvm_operations_no_const ops;
34086
34087 enum e1000_nvm_type type;
34088 enum e1000_nvm_override override;
34089 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34090 index f67cbd3..cef9e3d 100644
34091 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34092 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34093 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34094 s32 (*read_mac_addr)(struct e1000_hw *);
34095 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34096 };
34097 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34098
34099 struct e1000_phy_operations {
34100 s32 (*acquire)(struct e1000_hw *);
34101 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34102 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34103 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34104 };
34105 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34106
34107 struct e1000_nvm_operations {
34108 s32 (*acquire)(struct e1000_hw *);
34109 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34110 s32 (*update)(struct e1000_hw *);
34111 s32 (*validate)(struct e1000_hw *);
34112 };
34113 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34114
34115 struct e1000_info {
34116 s32 (*get_invariants)(struct e1000_hw *);
34117 @@ -350,7 +353,7 @@ struct e1000_info {
34118 extern const struct e1000_info e1000_82575_info;
34119
34120 struct e1000_mac_info {
34121 - struct e1000_mac_operations ops;
34122 + e1000_mac_operations_no_const ops;
34123
34124 u8 addr[6];
34125 u8 perm_addr[6];
34126 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34127 };
34128
34129 struct e1000_phy_info {
34130 - struct e1000_phy_operations ops;
34131 + e1000_phy_operations_no_const ops;
34132
34133 enum e1000_phy_type type;
34134
34135 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34136 };
34137
34138 struct e1000_nvm_info {
34139 - struct e1000_nvm_operations ops;
34140 + e1000_nvm_operations_no_const ops;
34141 enum e1000_nvm_type type;
34142 enum e1000_nvm_override override;
34143
34144 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34145 s32 (*check_for_ack)(struct e1000_hw *, u16);
34146 s32 (*check_for_rst)(struct e1000_hw *, u16);
34147 };
34148 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34149
34150 struct e1000_mbx_stats {
34151 u32 msgs_tx;
34152 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34153 };
34154
34155 struct e1000_mbx_info {
34156 - struct e1000_mbx_operations ops;
34157 + e1000_mbx_operations_no_const ops;
34158 struct e1000_mbx_stats stats;
34159 u32 timeout;
34160 u32 usec_delay;
34161 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34162 index 57db3c6..aa825fc 100644
34163 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34164 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34165 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34166 s32 (*read_mac_addr)(struct e1000_hw *);
34167 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34168 };
34169 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34170
34171 struct e1000_mac_info {
34172 - struct e1000_mac_operations ops;
34173 + e1000_mac_operations_no_const ops;
34174 u8 addr[6];
34175 u8 perm_addr[6];
34176
34177 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34178 s32 (*check_for_ack)(struct e1000_hw *);
34179 s32 (*check_for_rst)(struct e1000_hw *);
34180 };
34181 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34182
34183 struct e1000_mbx_stats {
34184 u32 msgs_tx;
34185 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34186 };
34187
34188 struct e1000_mbx_info {
34189 - struct e1000_mbx_operations ops;
34190 + e1000_mbx_operations_no_const ops;
34191 struct e1000_mbx_stats stats;
34192 u32 timeout;
34193 u32 usec_delay;
34194 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34195 index 9b95bef..7e254ee 100644
34196 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34197 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34198 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
34199 s32 (*update_checksum)(struct ixgbe_hw *);
34200 u16 (*calc_checksum)(struct ixgbe_hw *);
34201 };
34202 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34203
34204 struct ixgbe_mac_operations {
34205 s32 (*init_hw)(struct ixgbe_hw *);
34206 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
34207 /* Manageability interface */
34208 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34209 };
34210 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34211
34212 struct ixgbe_phy_operations {
34213 s32 (*identify)(struct ixgbe_hw *);
34214 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
34215 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34216 s32 (*check_overtemp)(struct ixgbe_hw *);
34217 };
34218 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34219
34220 struct ixgbe_eeprom_info {
34221 - struct ixgbe_eeprom_operations ops;
34222 + ixgbe_eeprom_operations_no_const ops;
34223 enum ixgbe_eeprom_type type;
34224 u32 semaphore_delay;
34225 u16 word_size;
34226 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
34227
34228 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34229 struct ixgbe_mac_info {
34230 - struct ixgbe_mac_operations ops;
34231 + ixgbe_mac_operations_no_const ops;
34232 enum ixgbe_mac_type type;
34233 u8 addr[ETH_ALEN];
34234 u8 perm_addr[ETH_ALEN];
34235 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
34236 };
34237
34238 struct ixgbe_phy_info {
34239 - struct ixgbe_phy_operations ops;
34240 + ixgbe_phy_operations_no_const ops;
34241 struct mdio_if_info mdio;
34242 enum ixgbe_phy_type type;
34243 u32 id;
34244 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
34245 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34246 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34247 };
34248 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34249
34250 struct ixgbe_mbx_stats {
34251 u32 msgs_tx;
34252 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
34253 };
34254
34255 struct ixgbe_mbx_info {
34256 - struct ixgbe_mbx_operations ops;
34257 + ixgbe_mbx_operations_no_const ops;
34258 struct ixgbe_mbx_stats stats;
34259 u32 timeout;
34260 u32 usec_delay;
34261 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34262 index 25c951d..cc7cf33 100644
34263 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34264 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34265 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34266 s32 (*clear_vfta)(struct ixgbe_hw *);
34267 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34268 };
34269 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34270
34271 enum ixgbe_mac_type {
34272 ixgbe_mac_unknown = 0,
34273 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34274 };
34275
34276 struct ixgbe_mac_info {
34277 - struct ixgbe_mac_operations ops;
34278 + ixgbe_mac_operations_no_const ops;
34279 u8 addr[6];
34280 u8 perm_addr[6];
34281
34282 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34283 s32 (*check_for_ack)(struct ixgbe_hw *);
34284 s32 (*check_for_rst)(struct ixgbe_hw *);
34285 };
34286 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34287
34288 struct ixgbe_mbx_stats {
34289 u32 msgs_tx;
34290 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34291 };
34292
34293 struct ixgbe_mbx_info {
34294 - struct ixgbe_mbx_operations ops;
34295 + ixgbe_mbx_operations_no_const ops;
34296 struct ixgbe_mbx_stats stats;
34297 u32 timeout;
34298 u32 udelay;
34299 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34300 index d498f04..1b49bed 100644
34301 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34302 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34303 @@ -41,6 +41,7 @@
34304 #include <linux/slab.h>
34305 #include <linux/io-mapping.h>
34306 #include <linux/delay.h>
34307 +#include <linux/sched.h>
34308
34309 #include <linux/mlx4/device.h>
34310 #include <linux/mlx4/doorbell.h>
34311 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34312 index 5046a64..71ca936 100644
34313 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34314 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34315 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34316 void (*link_down)(struct __vxge_hw_device *devh);
34317 void (*crit_err)(struct __vxge_hw_device *devh,
34318 enum vxge_hw_event type, u64 ext_data);
34319 -};
34320 +} __no_const;
34321
34322 /*
34323 * struct __vxge_hw_blockpool_entry - Block private data structure
34324 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34325 index 4a518a3..936b334 100644
34326 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34327 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34328 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34329 struct vxge_hw_mempool_dma *dma_object,
34330 u32 index,
34331 u32 is_last);
34332 -};
34333 +} __no_const;
34334
34335 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34336 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34337 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34338 index bbacb37..d60887d 100644
34339 --- a/drivers/net/ethernet/realtek/r8169.c
34340 +++ b/drivers/net/ethernet/realtek/r8169.c
34341 @@ -695,17 +695,17 @@ struct rtl8169_private {
34342 struct mdio_ops {
34343 void (*write)(void __iomem *, int, int);
34344 int (*read)(void __iomem *, int);
34345 - } mdio_ops;
34346 + } __no_const mdio_ops;
34347
34348 struct pll_power_ops {
34349 void (*down)(struct rtl8169_private *);
34350 void (*up)(struct rtl8169_private *);
34351 - } pll_power_ops;
34352 + } __no_const pll_power_ops;
34353
34354 struct jumbo_ops {
34355 void (*enable)(struct rtl8169_private *);
34356 void (*disable)(struct rtl8169_private *);
34357 - } jumbo_ops;
34358 + } __no_const jumbo_ops;
34359
34360 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34361 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34362 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34363 index 5b118cd..858b523 100644
34364 --- a/drivers/net/ethernet/sis/sis190.c
34365 +++ b/drivers/net/ethernet/sis/sis190.c
34366 @@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34367 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34368 struct net_device *dev)
34369 {
34370 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34371 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34372 struct sis190_private *tp = netdev_priv(dev);
34373 struct pci_dev *isa_bridge;
34374 u8 reg, tmp8;
34375 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34376 index c07cfe9..81cbf7e 100644
34377 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34378 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34379 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34380
34381 writel(value, ioaddr + MMC_CNTRL);
34382
34383 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34384 - MMC_CNTRL, value);
34385 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34386 +// MMC_CNTRL, value);
34387 }
34388
34389 /* To mask all all interrupts.*/
34390 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34391 index dec5836..6d4db7d 100644
34392 --- a/drivers/net/hyperv/hyperv_net.h
34393 +++ b/drivers/net/hyperv/hyperv_net.h
34394 @@ -97,7 +97,7 @@ struct rndis_device {
34395
34396 enum rndis_device_state state;
34397 bool link_state;
34398 - atomic_t new_req_id;
34399 + atomic_unchecked_t new_req_id;
34400
34401 spinlock_t request_lock;
34402 struct list_head req_list;
34403 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34404 index 133b7fb..d58c559 100644
34405 --- a/drivers/net/hyperv/rndis_filter.c
34406 +++ b/drivers/net/hyperv/rndis_filter.c
34407 @@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34408 * template
34409 */
34410 set = &rndis_msg->msg.set_req;
34411 - set->req_id = atomic_inc_return(&dev->new_req_id);
34412 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34413
34414 /* Add to the request list */
34415 spin_lock_irqsave(&dev->request_lock, flags);
34416 @@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34417
34418 /* Setup the rndis set */
34419 halt = &request->request_msg.msg.halt_req;
34420 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34421 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34422
34423 /* Ignore return since this msg is optional. */
34424 rndis_filter_send_request(dev, request);
34425 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34426 index 486b404..0d6677d 100644
34427 --- a/drivers/net/ppp/ppp_generic.c
34428 +++ b/drivers/net/ppp/ppp_generic.c
34429 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34430 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34431 struct ppp_stats stats;
34432 struct ppp_comp_stats cstats;
34433 - char *vers;
34434
34435 switch (cmd) {
34436 case SIOCGPPPSTATS:
34437 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34438 break;
34439
34440 case SIOCGPPPVER:
34441 - vers = PPP_VERSION;
34442 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34443 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34444 break;
34445 err = 0;
34446 break;
34447 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34448 index 515f122..41dd273 100644
34449 --- a/drivers/net/tokenring/abyss.c
34450 +++ b/drivers/net/tokenring/abyss.c
34451 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34452
34453 static int __init abyss_init (void)
34454 {
34455 - abyss_netdev_ops = tms380tr_netdev_ops;
34456 + pax_open_kernel();
34457 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34458
34459 - abyss_netdev_ops.ndo_open = abyss_open;
34460 - abyss_netdev_ops.ndo_stop = abyss_close;
34461 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34462 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34463 + pax_close_kernel();
34464
34465 return pci_register_driver(&abyss_driver);
34466 }
34467 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34468 index 6153cfd..cf69c1c 100644
34469 --- a/drivers/net/tokenring/madgemc.c
34470 +++ b/drivers/net/tokenring/madgemc.c
34471 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34472
34473 static int __init madgemc_init (void)
34474 {
34475 - madgemc_netdev_ops = tms380tr_netdev_ops;
34476 - madgemc_netdev_ops.ndo_open = madgemc_open;
34477 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34478 + pax_open_kernel();
34479 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34480 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34481 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34482 + pax_close_kernel();
34483
34484 return mca_register_driver (&madgemc_driver);
34485 }
34486 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34487 index 8d362e6..f91cc52 100644
34488 --- a/drivers/net/tokenring/proteon.c
34489 +++ b/drivers/net/tokenring/proteon.c
34490 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34491 struct platform_device *pdev;
34492 int i, num = 0, err = 0;
34493
34494 - proteon_netdev_ops = tms380tr_netdev_ops;
34495 - proteon_netdev_ops.ndo_open = proteon_open;
34496 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34497 + pax_open_kernel();
34498 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34499 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34500 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34501 + pax_close_kernel();
34502
34503 err = platform_driver_register(&proteon_driver);
34504 if (err)
34505 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34506 index 46db5c5..37c1536 100644
34507 --- a/drivers/net/tokenring/skisa.c
34508 +++ b/drivers/net/tokenring/skisa.c
34509 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34510 struct platform_device *pdev;
34511 int i, num = 0, err = 0;
34512
34513 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34514 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34515 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34516 + pax_open_kernel();
34517 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34518 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34519 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34520 + pax_close_kernel();
34521
34522 err = platform_driver_register(&sk_isa_driver);
34523 if (err)
34524 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34525 index e1324b4..e1b0041 100644
34526 --- a/drivers/net/usb/hso.c
34527 +++ b/drivers/net/usb/hso.c
34528 @@ -71,7 +71,7 @@
34529 #include <asm/byteorder.h>
34530 #include <linux/serial_core.h>
34531 #include <linux/serial.h>
34532 -
34533 +#include <asm/local.h>
34534
34535 #define MOD_AUTHOR "Option Wireless"
34536 #define MOD_DESCRIPTION "USB High Speed Option driver"
34537 @@ -257,7 +257,7 @@ struct hso_serial {
34538
34539 /* from usb_serial_port */
34540 struct tty_struct *tty;
34541 - int open_count;
34542 + local_t open_count;
34543 spinlock_t serial_lock;
34544
34545 int (*write_data) (struct hso_serial *serial);
34546 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34547 struct urb *urb;
34548
34549 urb = serial->rx_urb[0];
34550 - if (serial->open_count > 0) {
34551 + if (local_read(&serial->open_count) > 0) {
34552 count = put_rxbuf_data(urb, serial);
34553 if (count == -1)
34554 return;
34555 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34556 DUMP1(urb->transfer_buffer, urb->actual_length);
34557
34558 /* Anyone listening? */
34559 - if (serial->open_count == 0)
34560 + if (local_read(&serial->open_count) == 0)
34561 return;
34562
34563 if (status == 0) {
34564 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34565 spin_unlock_irq(&serial->serial_lock);
34566
34567 /* check for port already opened, if not set the termios */
34568 - serial->open_count++;
34569 - if (serial->open_count == 1) {
34570 + if (local_inc_return(&serial->open_count) == 1) {
34571 serial->rx_state = RX_IDLE;
34572 /* Force default termio settings */
34573 _hso_serial_set_termios(tty, NULL);
34574 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34575 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34576 if (result) {
34577 hso_stop_serial_device(serial->parent);
34578 - serial->open_count--;
34579 + local_dec(&serial->open_count);
34580 kref_put(&serial->parent->ref, hso_serial_ref_free);
34581 }
34582 } else {
34583 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34584
34585 /* reset the rts and dtr */
34586 /* do the actual close */
34587 - serial->open_count--;
34588 + local_dec(&serial->open_count);
34589
34590 - if (serial->open_count <= 0) {
34591 - serial->open_count = 0;
34592 + if (local_read(&serial->open_count) <= 0) {
34593 + local_set(&serial->open_count, 0);
34594 spin_lock_irq(&serial->serial_lock);
34595 if (serial->tty == tty) {
34596 serial->tty->driver_data = NULL;
34597 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34598
34599 /* the actual setup */
34600 spin_lock_irqsave(&serial->serial_lock, flags);
34601 - if (serial->open_count)
34602 + if (local_read(&serial->open_count))
34603 _hso_serial_set_termios(tty, old);
34604 else
34605 tty->termios = old;
34606 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34607 D1("Pending read interrupt on port %d\n", i);
34608 spin_lock(&serial->serial_lock);
34609 if (serial->rx_state == RX_IDLE &&
34610 - serial->open_count > 0) {
34611 + local_read(&serial->open_count) > 0) {
34612 /* Setup and send a ctrl req read on
34613 * port i */
34614 if (!serial->rx_urb_filled[0]) {
34615 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34616 /* Start all serial ports */
34617 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34618 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34619 - if (dev2ser(serial_table[i])->open_count) {
34620 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34621 result =
34622 hso_start_serial_device(serial_table[i], GFP_NOIO);
34623 hso_kick_transmit(dev2ser(serial_table[i]));
34624 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34625 index efc0111..79c8f5b 100644
34626 --- a/drivers/net/wireless/ath/ath.h
34627 +++ b/drivers/net/wireless/ath/ath.h
34628 @@ -119,6 +119,7 @@ struct ath_ops {
34629 void (*write_flush) (void *);
34630 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34631 };
34632 +typedef struct ath_ops __no_const ath_ops_no_const;
34633
34634 struct ath_common;
34635 struct ath_bus_ops;
34636 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34637 index 7b6417b..ab5db98 100644
34638 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34639 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34640 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34641 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
34642 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
34643
34644 - ACCESS_ONCE(ads->ds_link) = i->link;
34645 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
34646 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
34647 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
34648
34649 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
34650 ctl6 = SM(i->keytype, AR_EncrType);
34651 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34652
34653 if ((i->is_first || i->is_last) &&
34654 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
34655 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
34656 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
34657 | set11nTries(i->rates, 1)
34658 | set11nTries(i->rates, 2)
34659 | set11nTries(i->rates, 3)
34660 | (i->dur_update ? AR_DurUpdateEna : 0)
34661 | SM(0, AR_BurstDur);
34662
34663 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
34664 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
34665 | set11nRate(i->rates, 1)
34666 | set11nRate(i->rates, 2)
34667 | set11nRate(i->rates, 3);
34668 } else {
34669 - ACCESS_ONCE(ads->ds_ctl2) = 0;
34670 - ACCESS_ONCE(ads->ds_ctl3) = 0;
34671 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
34672 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
34673 }
34674
34675 if (!i->is_first) {
34676 - ACCESS_ONCE(ads->ds_ctl0) = 0;
34677 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34678 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34679 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
34680 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34681 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34682 return;
34683 }
34684
34685 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34686 break;
34687 }
34688
34689 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34690 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34691 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34692 | SM(i->txpower, AR_XmitPower)
34693 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34694 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34695 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
34696 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
34697
34698 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34699 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34700 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34701 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34702
34703 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
34704 return;
34705
34706 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34707 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34708 | set11nPktDurRTSCTS(i->rates, 1);
34709
34710 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34711 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34712 | set11nPktDurRTSCTS(i->rates, 3);
34713
34714 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34715 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34716 | set11nRateFlags(i->rates, 1)
34717 | set11nRateFlags(i->rates, 2)
34718 | set11nRateFlags(i->rates, 3)
34719 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34720 index 09b8c9d..905339e 100644
34721 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34722 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34723 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34724 (i->qcu << AR_TxQcuNum_S) | 0x17;
34725
34726 checksum += val;
34727 - ACCESS_ONCE(ads->info) = val;
34728 + ACCESS_ONCE_RW(ads->info) = val;
34729
34730 checksum += i->link;
34731 - ACCESS_ONCE(ads->link) = i->link;
34732 + ACCESS_ONCE_RW(ads->link) = i->link;
34733
34734 checksum += i->buf_addr[0];
34735 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
34736 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
34737 checksum += i->buf_addr[1];
34738 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
34739 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
34740 checksum += i->buf_addr[2];
34741 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
34742 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
34743 checksum += i->buf_addr[3];
34744 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
34745 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
34746
34747 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
34748 - ACCESS_ONCE(ads->ctl3) = val;
34749 + ACCESS_ONCE_RW(ads->ctl3) = val;
34750 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
34751 - ACCESS_ONCE(ads->ctl5) = val;
34752 + ACCESS_ONCE_RW(ads->ctl5) = val;
34753 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
34754 - ACCESS_ONCE(ads->ctl7) = val;
34755 + ACCESS_ONCE_RW(ads->ctl7) = val;
34756 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
34757 - ACCESS_ONCE(ads->ctl9) = val;
34758 + ACCESS_ONCE_RW(ads->ctl9) = val;
34759
34760 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
34761 - ACCESS_ONCE(ads->ctl10) = checksum;
34762 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
34763
34764 if (i->is_first || i->is_last) {
34765 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
34766 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
34767 | set11nTries(i->rates, 1)
34768 | set11nTries(i->rates, 2)
34769 | set11nTries(i->rates, 3)
34770 | (i->dur_update ? AR_DurUpdateEna : 0)
34771 | SM(0, AR_BurstDur);
34772
34773 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
34774 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
34775 | set11nRate(i->rates, 1)
34776 | set11nRate(i->rates, 2)
34777 | set11nRate(i->rates, 3);
34778 } else {
34779 - ACCESS_ONCE(ads->ctl13) = 0;
34780 - ACCESS_ONCE(ads->ctl14) = 0;
34781 + ACCESS_ONCE_RW(ads->ctl13) = 0;
34782 + ACCESS_ONCE_RW(ads->ctl14) = 0;
34783 }
34784
34785 ads->ctl20 = 0;
34786 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34787
34788 ctl17 = SM(i->keytype, AR_EncrType);
34789 if (!i->is_first) {
34790 - ACCESS_ONCE(ads->ctl11) = 0;
34791 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34792 - ACCESS_ONCE(ads->ctl15) = 0;
34793 - ACCESS_ONCE(ads->ctl16) = 0;
34794 - ACCESS_ONCE(ads->ctl17) = ctl17;
34795 - ACCESS_ONCE(ads->ctl18) = 0;
34796 - ACCESS_ONCE(ads->ctl19) = 0;
34797 + ACCESS_ONCE_RW(ads->ctl11) = 0;
34798 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34799 + ACCESS_ONCE_RW(ads->ctl15) = 0;
34800 + ACCESS_ONCE_RW(ads->ctl16) = 0;
34801 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34802 + ACCESS_ONCE_RW(ads->ctl18) = 0;
34803 + ACCESS_ONCE_RW(ads->ctl19) = 0;
34804 return;
34805 }
34806
34807 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34808 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34809 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34810 | SM(i->txpower, AR_XmitPower)
34811 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34812 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34813 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
34814 ctl12 |= SM(val, AR_PAPRDChainMask);
34815
34816 - ACCESS_ONCE(ads->ctl12) = ctl12;
34817 - ACCESS_ONCE(ads->ctl17) = ctl17;
34818 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
34819 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34820
34821 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34822 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34823 | set11nPktDurRTSCTS(i->rates, 1);
34824
34825 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34826 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34827 | set11nPktDurRTSCTS(i->rates, 3);
34828
34829 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
34830 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
34831 | set11nRateFlags(i->rates, 1)
34832 | set11nRateFlags(i->rates, 2)
34833 | set11nRateFlags(i->rates, 3)
34834 | SM(i->rtscts_rate, AR_RTSCTSRate);
34835
34836 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
34837 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
34838 }
34839
34840 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
34841 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34842 index c8261d4..8d88929 100644
34843 --- a/drivers/net/wireless/ath/ath9k/hw.h
34844 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34845 @@ -773,7 +773,7 @@ struct ath_hw_private_ops {
34846
34847 /* ANI */
34848 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34849 -};
34850 +} __no_const;
34851
34852 /**
34853 * struct ath_hw_ops - callbacks used by hardware code and driver code
34854 @@ -803,7 +803,7 @@ struct ath_hw_ops {
34855 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34856 struct ath_hw_antcomb_conf *antconf);
34857
34858 -};
34859 +} __no_const;
34860
34861 struct ath_nf_limits {
34862 s16 max;
34863 @@ -823,7 +823,7 @@ enum ath_cal_list {
34864 #define AH_FASTCC 0x4
34865
34866 struct ath_hw {
34867 - struct ath_ops reg_ops;
34868 + ath_ops_no_const reg_ops;
34869
34870 struct ieee80211_hw *hw;
34871 struct ath_common common;
34872 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34873 index af00e2c..ab04d34 100644
34874 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34875 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34876 @@ -545,7 +545,7 @@ struct phy_func_ptr {
34877 void (*carrsuppr)(struct brcms_phy *);
34878 s32 (*rxsigpwr)(struct brcms_phy *, s32);
34879 void (*detach)(struct brcms_phy *);
34880 -};
34881 +} __no_const;
34882
34883 struct brcms_phy {
34884 struct brcms_phy_pub pubpi_ro;
34885 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
34886 index a7dfba8..e28eacd 100644
34887 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
34888 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
34889 @@ -3647,7 +3647,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
34890 */
34891 if (il3945_mod_params.disable_hw_scan) {
34892 D_INFO("Disabling hw_scan\n");
34893 - il3945_hw_ops.hw_scan = NULL;
34894 + pax_open_kernel();
34895 + *(void **)&il3945_hw_ops.hw_scan = NULL;
34896 + pax_close_kernel();
34897 }
34898
34899 D_INFO("*** LOAD DRIVER ***\n");
34900 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34901 index f8fc239..8cade22 100644
34902 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34903 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34904 @@ -86,8 +86,8 @@ do { \
34905 } while (0)
34906
34907 #else
34908 -#define IWL_DEBUG(m, level, fmt, args...)
34909 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
34910 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
34911 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
34912 #define iwl_print_hex_dump(m, level, p, len)
34913 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
34914 do { \
34915 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34916 index 4b9e730..7603659 100644
34917 --- a/drivers/net/wireless/mac80211_hwsim.c
34918 +++ b/drivers/net/wireless/mac80211_hwsim.c
34919 @@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
34920 return -EINVAL;
34921
34922 if (fake_hw_scan) {
34923 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34924 - mac80211_hwsim_ops.sw_scan_start = NULL;
34925 - mac80211_hwsim_ops.sw_scan_complete = NULL;
34926 + pax_open_kernel();
34927 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34928 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34929 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34930 + pax_close_kernel();
34931 }
34932
34933 spin_lock_init(&hwsim_radio_lock);
34934 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34935 index 3186aa4..b35b09f 100644
34936 --- a/drivers/net/wireless/mwifiex/main.h
34937 +++ b/drivers/net/wireless/mwifiex/main.h
34938 @@ -536,7 +536,7 @@ struct mwifiex_if_ops {
34939 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34940 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
34941 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
34942 -};
34943 +} __no_const;
34944
34945 struct mwifiex_adapter {
34946 u8 iface_type;
34947 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34948 index a330c69..a81540f 100644
34949 --- a/drivers/net/wireless/rndis_wlan.c
34950 +++ b/drivers/net/wireless/rndis_wlan.c
34951 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34952
34953 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34954
34955 - if (rts_threshold < 0 || rts_threshold > 2347)
34956 + if (rts_threshold > 2347)
34957 rts_threshold = 2347;
34958
34959 tmp = cpu_to_le32(rts_threshold);
34960 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34961 index a77f1bb..c608b2b 100644
34962 --- a/drivers/net/wireless/wl1251/wl1251.h
34963 +++ b/drivers/net/wireless/wl1251/wl1251.h
34964 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
34965 void (*reset)(struct wl1251 *wl);
34966 void (*enable_irq)(struct wl1251 *wl);
34967 void (*disable_irq)(struct wl1251 *wl);
34968 -};
34969 +} __no_const;
34970
34971 struct wl1251 {
34972 struct ieee80211_hw *hw;
34973 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34974 index f34b5b2..b5abb9f 100644
34975 --- a/drivers/oprofile/buffer_sync.c
34976 +++ b/drivers/oprofile/buffer_sync.c
34977 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34978 if (cookie == NO_COOKIE)
34979 offset = pc;
34980 if (cookie == INVALID_COOKIE) {
34981 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34982 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34983 offset = pc;
34984 }
34985 if (cookie != last_cookie) {
34986 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
34987 /* add userspace sample */
34988
34989 if (!mm) {
34990 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
34991 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34992 return 0;
34993 }
34994
34995 cookie = lookup_dcookie(mm, s->eip, &offset);
34996
34997 if (cookie == INVALID_COOKIE) {
34998 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34999 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35000 return 0;
35001 }
35002
35003 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35004 /* ignore backtraces if failed to add a sample */
35005 if (state == sb_bt_start) {
35006 state = sb_bt_ignore;
35007 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35008 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35009 }
35010 }
35011 release_mm(mm);
35012 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35013 index c0cc4e7..44d4e54 100644
35014 --- a/drivers/oprofile/event_buffer.c
35015 +++ b/drivers/oprofile/event_buffer.c
35016 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35017 }
35018
35019 if (buffer_pos == buffer_size) {
35020 - atomic_inc(&oprofile_stats.event_lost_overflow);
35021 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35022 return;
35023 }
35024
35025 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35026 index ed2c3ec..deda85a 100644
35027 --- a/drivers/oprofile/oprof.c
35028 +++ b/drivers/oprofile/oprof.c
35029 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35030 if (oprofile_ops.switch_events())
35031 return;
35032
35033 - atomic_inc(&oprofile_stats.multiplex_counter);
35034 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35035 start_switch_worker();
35036 }
35037
35038 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35039 index 917d28e..d62d981 100644
35040 --- a/drivers/oprofile/oprofile_stats.c
35041 +++ b/drivers/oprofile/oprofile_stats.c
35042 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35043 cpu_buf->sample_invalid_eip = 0;
35044 }
35045
35046 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35047 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35048 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35049 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35050 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35051 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35052 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35053 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35054 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35055 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35056 }
35057
35058
35059 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35060 index 38b6fc0..b5cbfce 100644
35061 --- a/drivers/oprofile/oprofile_stats.h
35062 +++ b/drivers/oprofile/oprofile_stats.h
35063 @@ -13,11 +13,11 @@
35064 #include <linux/atomic.h>
35065
35066 struct oprofile_stat_struct {
35067 - atomic_t sample_lost_no_mm;
35068 - atomic_t sample_lost_no_mapping;
35069 - atomic_t bt_lost_no_mapping;
35070 - atomic_t event_lost_overflow;
35071 - atomic_t multiplex_counter;
35072 + atomic_unchecked_t sample_lost_no_mm;
35073 + atomic_unchecked_t sample_lost_no_mapping;
35074 + atomic_unchecked_t bt_lost_no_mapping;
35075 + atomic_unchecked_t event_lost_overflow;
35076 + atomic_unchecked_t multiplex_counter;
35077 };
35078
35079 extern struct oprofile_stat_struct oprofile_stats;
35080 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35081 index 2f0aa0f..90fab02 100644
35082 --- a/drivers/oprofile/oprofilefs.c
35083 +++ b/drivers/oprofile/oprofilefs.c
35084 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35085
35086
35087 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35088 - char const *name, atomic_t *val)
35089 + char const *name, atomic_unchecked_t *val)
35090 {
35091 return __oprofilefs_create_file(sb, root, name,
35092 &atomic_ro_fops, 0444, val);
35093 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35094 index 3f56bc0..707d642 100644
35095 --- a/drivers/parport/procfs.c
35096 +++ b/drivers/parport/procfs.c
35097 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35098
35099 *ppos += len;
35100
35101 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35102 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35103 }
35104
35105 #ifdef CONFIG_PARPORT_1284
35106 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35107
35108 *ppos += len;
35109
35110 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35111 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35112 }
35113 #endif /* IEEE1284.3 support. */
35114
35115 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35116 index 9fff878..ad0ad53 100644
35117 --- a/drivers/pci/hotplug/cpci_hotplug.h
35118 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35119 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35120 int (*hardware_test) (struct slot* slot, u32 value);
35121 u8 (*get_power) (struct slot* slot);
35122 int (*set_power) (struct slot* slot, int value);
35123 -};
35124 +} __no_const;
35125
35126 struct cpci_hp_controller {
35127 unsigned int irq;
35128 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35129 index 76ba8a1..20ca857 100644
35130 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35131 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35132 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35133
35134 void compaq_nvram_init (void __iomem *rom_start)
35135 {
35136 +
35137 +#ifndef CONFIG_PAX_KERNEXEC
35138 if (rom_start) {
35139 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35140 }
35141 +#endif
35142 +
35143 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35144
35145 /* initialize our int15 lock */
35146 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35147 index 24f049e..051f66e 100644
35148 --- a/drivers/pci/pcie/aspm.c
35149 +++ b/drivers/pci/pcie/aspm.c
35150 @@ -27,9 +27,9 @@
35151 #define MODULE_PARAM_PREFIX "pcie_aspm."
35152
35153 /* Note: those are not register definitions */
35154 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35155 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35156 -#define ASPM_STATE_L1 (4) /* L1 state */
35157 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35158 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35159 +#define ASPM_STATE_L1 (4U) /* L1 state */
35160 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35161 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35162
35163 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35164 index 71eac9c..2de27ef 100644
35165 --- a/drivers/pci/probe.c
35166 +++ b/drivers/pci/probe.c
35167 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35168 u32 l, sz, mask;
35169 u16 orig_cmd;
35170
35171 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35172 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35173
35174 if (!dev->mmio_always_on) {
35175 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35176 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35177 index 27911b5..5b6db88 100644
35178 --- a/drivers/pci/proc.c
35179 +++ b/drivers/pci/proc.c
35180 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35181 static int __init pci_proc_init(void)
35182 {
35183 struct pci_dev *dev = NULL;
35184 +
35185 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35186 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35187 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35188 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35189 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35190 +#endif
35191 +#else
35192 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35193 +#endif
35194 proc_create("devices", 0, proc_bus_pci_dir,
35195 &proc_bus_pci_dev_operations);
35196 proc_initialized = 1;
35197 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35198 index ea0c607..58c4628 100644
35199 --- a/drivers/platform/x86/thinkpad_acpi.c
35200 +++ b/drivers/platform/x86/thinkpad_acpi.c
35201 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35202 return 0;
35203 }
35204
35205 -void static hotkey_mask_warn_incomplete_mask(void)
35206 +static void hotkey_mask_warn_incomplete_mask(void)
35207 {
35208 /* log only what the user can fix... */
35209 const u32 wantedmask = hotkey_driver_mask &
35210 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35211 }
35212 }
35213
35214 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35215 - struct tp_nvram_state *newn,
35216 - const u32 event_mask)
35217 -{
35218 -
35219 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35220 do { \
35221 if ((event_mask & (1 << __scancode)) && \
35222 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35223 tpacpi_hotkey_send_key(__scancode); \
35224 } while (0)
35225
35226 - void issue_volchange(const unsigned int oldvol,
35227 - const unsigned int newvol)
35228 - {
35229 - unsigned int i = oldvol;
35230 +static void issue_volchange(const unsigned int oldvol,
35231 + const unsigned int newvol,
35232 + const u32 event_mask)
35233 +{
35234 + unsigned int i = oldvol;
35235
35236 - while (i > newvol) {
35237 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35238 - i--;
35239 - }
35240 - while (i < newvol) {
35241 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35242 - i++;
35243 - }
35244 + while (i > newvol) {
35245 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35246 + i--;
35247 }
35248 + while (i < newvol) {
35249 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35250 + i++;
35251 + }
35252 +}
35253
35254 - void issue_brightnesschange(const unsigned int oldbrt,
35255 - const unsigned int newbrt)
35256 - {
35257 - unsigned int i = oldbrt;
35258 +static void issue_brightnesschange(const unsigned int oldbrt,
35259 + const unsigned int newbrt,
35260 + const u32 event_mask)
35261 +{
35262 + unsigned int i = oldbrt;
35263
35264 - while (i > newbrt) {
35265 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35266 - i--;
35267 - }
35268 - while (i < newbrt) {
35269 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35270 - i++;
35271 - }
35272 + while (i > newbrt) {
35273 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35274 + i--;
35275 + }
35276 + while (i < newbrt) {
35277 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35278 + i++;
35279 }
35280 +}
35281
35282 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35283 + struct tp_nvram_state *newn,
35284 + const u32 event_mask)
35285 +{
35286 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35287 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35288 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35289 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35290 oldn->volume_level != newn->volume_level) {
35291 /* recently muted, or repeated mute keypress, or
35292 * multiple presses ending in mute */
35293 - issue_volchange(oldn->volume_level, newn->volume_level);
35294 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35295 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35296 }
35297 } else {
35298 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35299 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35300 }
35301 if (oldn->volume_level != newn->volume_level) {
35302 - issue_volchange(oldn->volume_level, newn->volume_level);
35303 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35304 } else if (oldn->volume_toggle != newn->volume_toggle) {
35305 /* repeated vol up/down keypress at end of scale ? */
35306 if (newn->volume_level == 0)
35307 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35308 /* handle brightness */
35309 if (oldn->brightness_level != newn->brightness_level) {
35310 issue_brightnesschange(oldn->brightness_level,
35311 - newn->brightness_level);
35312 + newn->brightness_level,
35313 + event_mask);
35314 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35315 /* repeated key presses that didn't change state */
35316 if (newn->brightness_level == 0)
35317 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35318 && !tp_features.bright_unkfw)
35319 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35320 }
35321 +}
35322
35323 #undef TPACPI_COMPARE_KEY
35324 #undef TPACPI_MAY_SEND_KEY
35325 -}
35326
35327 /*
35328 * Polling driver
35329 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35330 index b859d16..5cc6b1a 100644
35331 --- a/drivers/pnp/pnpbios/bioscalls.c
35332 +++ b/drivers/pnp/pnpbios/bioscalls.c
35333 @@ -59,7 +59,7 @@ do { \
35334 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35335 } while(0)
35336
35337 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35338 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35339 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35340
35341 /*
35342 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35343
35344 cpu = get_cpu();
35345 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35346 +
35347 + pax_open_kernel();
35348 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35349 + pax_close_kernel();
35350
35351 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35352 spin_lock_irqsave(&pnp_bios_lock, flags);
35353 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35354 :"memory");
35355 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35356
35357 + pax_open_kernel();
35358 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35359 + pax_close_kernel();
35360 +
35361 put_cpu();
35362
35363 /* If we get here and this is set then the PnP BIOS faulted on us. */
35364 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35365 return status;
35366 }
35367
35368 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35369 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35370 {
35371 int i;
35372
35373 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35374 pnp_bios_callpoint.offset = header->fields.pm16offset;
35375 pnp_bios_callpoint.segment = PNP_CS16;
35376
35377 + pax_open_kernel();
35378 +
35379 for_each_possible_cpu(i) {
35380 struct desc_struct *gdt = get_cpu_gdt_table(i);
35381 if (!gdt)
35382 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35383 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35384 (unsigned long)__va(header->fields.pm16dseg));
35385 }
35386 +
35387 + pax_close_kernel();
35388 }
35389 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35390 index b0ecacb..7c9da2e 100644
35391 --- a/drivers/pnp/resource.c
35392 +++ b/drivers/pnp/resource.c
35393 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35394 return 1;
35395
35396 /* check if the resource is valid */
35397 - if (*irq < 0 || *irq > 15)
35398 + if (*irq > 15)
35399 return 0;
35400
35401 /* check if the resource is reserved */
35402 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35403 return 1;
35404
35405 /* check if the resource is valid */
35406 - if (*dma < 0 || *dma == 4 || *dma > 7)
35407 + if (*dma == 4 || *dma > 7)
35408 return 0;
35409
35410 /* check if the resource is reserved */
35411 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35412 index 1ed6ea0..77c0bd2 100644
35413 --- a/drivers/power/bq27x00_battery.c
35414 +++ b/drivers/power/bq27x00_battery.c
35415 @@ -72,7 +72,7 @@
35416 struct bq27x00_device_info;
35417 struct bq27x00_access_methods {
35418 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35419 -};
35420 +} __no_const;
35421
35422 enum bq27x00_chip { BQ27000, BQ27500 };
35423
35424 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35425 index a838e66..a9e1665 100644
35426 --- a/drivers/regulator/max8660.c
35427 +++ b/drivers/regulator/max8660.c
35428 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35429 max8660->shadow_regs[MAX8660_OVER1] = 5;
35430 } else {
35431 /* Otherwise devices can be toggled via software */
35432 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35433 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35434 + pax_open_kernel();
35435 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35436 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35437 + pax_close_kernel();
35438 }
35439
35440 /*
35441 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35442 index e8cfc99..072aee2 100644
35443 --- a/drivers/regulator/mc13892-regulator.c
35444 +++ b/drivers/regulator/mc13892-regulator.c
35445 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35446 }
35447 mc13xxx_unlock(mc13892);
35448
35449 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35450 + pax_open_kernel();
35451 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35452 = mc13892_vcam_set_mode;
35453 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35454 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35455 = mc13892_vcam_get_mode;
35456 + pax_close_kernel();
35457
35458 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35459 ARRAY_SIZE(mc13892_regulators));
35460 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35461 index cace6d3..f623fda 100644
35462 --- a/drivers/rtc/rtc-dev.c
35463 +++ b/drivers/rtc/rtc-dev.c
35464 @@ -14,6 +14,7 @@
35465 #include <linux/module.h>
35466 #include <linux/rtc.h>
35467 #include <linux/sched.h>
35468 +#include <linux/grsecurity.h>
35469 #include "rtc-core.h"
35470
35471 static dev_t rtc_devt;
35472 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35473 if (copy_from_user(&tm, uarg, sizeof(tm)))
35474 return -EFAULT;
35475
35476 + gr_log_timechange();
35477 +
35478 return rtc_set_time(rtc, &tm);
35479
35480 case RTC_PIE_ON:
35481 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35482 index ffb5878..e6d785c 100644
35483 --- a/drivers/scsi/aacraid/aacraid.h
35484 +++ b/drivers/scsi/aacraid/aacraid.h
35485 @@ -492,7 +492,7 @@ struct adapter_ops
35486 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35487 /* Administrative operations */
35488 int (*adapter_comm)(struct aac_dev * dev, int comm);
35489 -};
35490 +} __no_const;
35491
35492 /*
35493 * Define which interrupt handler needs to be installed
35494 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35495 index 705e13e..91c873c 100644
35496 --- a/drivers/scsi/aacraid/linit.c
35497 +++ b/drivers/scsi/aacraid/linit.c
35498 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35499 #elif defined(__devinitconst)
35500 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35501 #else
35502 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35503 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35504 #endif
35505 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35506 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35507 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35508 index d5ff142..49c0ebb 100644
35509 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35510 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35511 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35512 .lldd_control_phy = asd_control_phy,
35513 };
35514
35515 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35516 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35517 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35518 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35519 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35520 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35521 index a796de9..1ef20e1 100644
35522 --- a/drivers/scsi/bfa/bfa.h
35523 +++ b/drivers/scsi/bfa/bfa.h
35524 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35525 u32 *end);
35526 int cpe_vec_q0;
35527 int rme_vec_q0;
35528 -};
35529 +} __no_const;
35530 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35531
35532 struct bfa_faa_cbfn_s {
35533 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35534 index f0f80e2..8ec946b 100644
35535 --- a/drivers/scsi/bfa/bfa_fcpim.c
35536 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35537 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
35538
35539 bfa_iotag_attach(fcp);
35540
35541 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
35542 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
35543 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
35544 (fcp->num_itns * sizeof(struct bfa_itn_s));
35545 memset(fcp->itn_arr, 0,
35546 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35547 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35548 {
35549 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35550 - struct bfa_itn_s *itn;
35551 + bfa_itn_s_no_const *itn;
35552
35553 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35554 itn->isr = isr;
35555 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35556 index 36f26da..38a34a8 100644
35557 --- a/drivers/scsi/bfa/bfa_fcpim.h
35558 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35559 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35560 struct bfa_itn_s {
35561 bfa_isr_func_t isr;
35562 };
35563 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35564
35565 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35566 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35567 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
35568 struct list_head iotag_tio_free_q; /* free IO resources */
35569 struct list_head iotag_unused_q; /* unused IO resources*/
35570 struct bfa_iotag_s *iotag_arr;
35571 - struct bfa_itn_s *itn_arr;
35572 + bfa_itn_s_no_const *itn_arr;
35573 int num_ioim_reqs;
35574 int num_fwtio_reqs;
35575 int num_itns;
35576 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35577 index 546d46b..642fa5b 100644
35578 --- a/drivers/scsi/bfa/bfa_ioc.h
35579 +++ b/drivers/scsi/bfa/bfa_ioc.h
35580 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35581 bfa_ioc_disable_cbfn_t disable_cbfn;
35582 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35583 bfa_ioc_reset_cbfn_t reset_cbfn;
35584 -};
35585 +} __no_const;
35586
35587 /*
35588 * IOC event notification mechanism.
35589 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35590 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35591 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35592 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35593 -};
35594 +} __no_const;
35595
35596 /*
35597 * Queue element to wait for room in request queue. FIFO order is
35598 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35599 index 351dc0b..951dc32 100644
35600 --- a/drivers/scsi/hosts.c
35601 +++ b/drivers/scsi/hosts.c
35602 @@ -42,7 +42,7 @@
35603 #include "scsi_logging.h"
35604
35605
35606 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35607 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35608
35609
35610 static void scsi_host_cls_release(struct device *dev)
35611 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35612 * subtract one because we increment first then return, but we need to
35613 * know what the next host number was before increment
35614 */
35615 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35616 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35617 shost->dma_channel = 0xff;
35618
35619 /* These three are default values which can be overridden */
35620 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35621 index b96962c..0c82ec2 100644
35622 --- a/drivers/scsi/hpsa.c
35623 +++ b/drivers/scsi/hpsa.c
35624 @@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
35625 u32 a;
35626
35627 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35628 - return h->access.command_completed(h);
35629 + return h->access->command_completed(h);
35630
35631 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35632 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35633 @@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
35634 while (!list_empty(&h->reqQ)) {
35635 c = list_entry(h->reqQ.next, struct CommandList, list);
35636 /* can't do anything if fifo is full */
35637 - if ((h->access.fifo_full(h))) {
35638 + if ((h->access->fifo_full(h))) {
35639 dev_warn(&h->pdev->dev, "fifo full\n");
35640 break;
35641 }
35642 @@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
35643 h->Qdepth--;
35644
35645 /* Tell the controller execute command */
35646 - h->access.submit_command(h, c);
35647 + h->access->submit_command(h, c);
35648
35649 /* Put job onto the completed Q */
35650 addQ(&h->cmpQ, c);
35651 @@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
35652
35653 static inline unsigned long get_next_completion(struct ctlr_info *h)
35654 {
35655 - return h->access.command_completed(h);
35656 + return h->access->command_completed(h);
35657 }
35658
35659 static inline bool interrupt_pending(struct ctlr_info *h)
35660 {
35661 - return h->access.intr_pending(h);
35662 + return h->access->intr_pending(h);
35663 }
35664
35665 static inline long interrupt_not_for_us(struct ctlr_info *h)
35666 {
35667 - return (h->access.intr_pending(h) == 0) ||
35668 + return (h->access->intr_pending(h) == 0) ||
35669 (h->interrupts_enabled == 0);
35670 }
35671
35672 @@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35673 if (prod_index < 0)
35674 return -ENODEV;
35675 h->product_name = products[prod_index].product_name;
35676 - h->access = *(products[prod_index].access);
35677 + h->access = products[prod_index].access;
35678
35679 if (hpsa_board_disabled(h->pdev)) {
35680 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35681 @@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
35682
35683 assert_spin_locked(&lockup_detector_lock);
35684 remove_ctlr_from_lockup_detector_list(h);
35685 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35686 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35687 spin_lock_irqsave(&h->lock, flags);
35688 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
35689 spin_unlock_irqrestore(&h->lock, flags);
35690 @@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
35691 }
35692
35693 /* make sure the board interrupts are off */
35694 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35695 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35696
35697 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35698 goto clean2;
35699 @@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
35700 * fake ones to scoop up any residual completions.
35701 */
35702 spin_lock_irqsave(&h->lock, flags);
35703 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35704 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35705 spin_unlock_irqrestore(&h->lock, flags);
35706 free_irq(h->intr[h->intr_mode], h);
35707 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35708 @@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
35709 dev_info(&h->pdev->dev, "Board READY.\n");
35710 dev_info(&h->pdev->dev,
35711 "Waiting for stale completions to drain.\n");
35712 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35713 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35714 msleep(10000);
35715 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35716 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35717
35718 rc = controller_reset_failed(h->cfgtable);
35719 if (rc)
35720 @@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
35721 }
35722
35723 /* Turn the interrupts on so we can service requests */
35724 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35725 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35726
35727 hpsa_hba_inquiry(h);
35728 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35729 @@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35730 * To write all data in the battery backed cache to disks
35731 */
35732 hpsa_flush_cache(h);
35733 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35734 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35735 free_irq(h->intr[h->intr_mode], h);
35736 #ifdef CONFIG_PCI_MSI
35737 if (h->msix_vector)
35738 @@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35739 return;
35740 }
35741 /* Change the access methods to the performant access methods */
35742 - h->access = SA5_performant_access;
35743 + h->access = &SA5_performant_access;
35744 h->transMethod = CFGTBL_Trans_Performant;
35745 }
35746
35747 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35748 index 91edafb..a9b88ec 100644
35749 --- a/drivers/scsi/hpsa.h
35750 +++ b/drivers/scsi/hpsa.h
35751 @@ -73,7 +73,7 @@ struct ctlr_info {
35752 unsigned int msix_vector;
35753 unsigned int msi_vector;
35754 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35755 - struct access_method access;
35756 + struct access_method *access;
35757
35758 /* queue and queue Info */
35759 struct list_head reqQ;
35760 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35761 index f2df059..a3a9930 100644
35762 --- a/drivers/scsi/ips.h
35763 +++ b/drivers/scsi/ips.h
35764 @@ -1027,7 +1027,7 @@ typedef struct {
35765 int (*intr)(struct ips_ha *);
35766 void (*enableint)(struct ips_ha *);
35767 uint32_t (*statupd)(struct ips_ha *);
35768 -} ips_hw_func_t;
35769 +} __no_const ips_hw_func_t;
35770
35771 typedef struct ips_ha {
35772 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35773 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35774 index 4d70d96..84d0573 100644
35775 --- a/drivers/scsi/libfc/fc_exch.c
35776 +++ b/drivers/scsi/libfc/fc_exch.c
35777 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
35778 * all together if not used XXX
35779 */
35780 struct {
35781 - atomic_t no_free_exch;
35782 - atomic_t no_free_exch_xid;
35783 - atomic_t xid_not_found;
35784 - atomic_t xid_busy;
35785 - atomic_t seq_not_found;
35786 - atomic_t non_bls_resp;
35787 + atomic_unchecked_t no_free_exch;
35788 + atomic_unchecked_t no_free_exch_xid;
35789 + atomic_unchecked_t xid_not_found;
35790 + atomic_unchecked_t xid_busy;
35791 + atomic_unchecked_t seq_not_found;
35792 + atomic_unchecked_t non_bls_resp;
35793 } stats;
35794 };
35795
35796 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35797 /* allocate memory for exchange */
35798 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35799 if (!ep) {
35800 - atomic_inc(&mp->stats.no_free_exch);
35801 + atomic_inc_unchecked(&mp->stats.no_free_exch);
35802 goto out;
35803 }
35804 memset(ep, 0, sizeof(*ep));
35805 @@ -780,7 +780,7 @@ out:
35806 return ep;
35807 err:
35808 spin_unlock_bh(&pool->lock);
35809 - atomic_inc(&mp->stats.no_free_exch_xid);
35810 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35811 mempool_free(ep, mp->ep_pool);
35812 return NULL;
35813 }
35814 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35815 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35816 ep = fc_exch_find(mp, xid);
35817 if (!ep) {
35818 - atomic_inc(&mp->stats.xid_not_found);
35819 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35820 reject = FC_RJT_OX_ID;
35821 goto out;
35822 }
35823 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35824 ep = fc_exch_find(mp, xid);
35825 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35826 if (ep) {
35827 - atomic_inc(&mp->stats.xid_busy);
35828 + atomic_inc_unchecked(&mp->stats.xid_busy);
35829 reject = FC_RJT_RX_ID;
35830 goto rel;
35831 }
35832 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35833 }
35834 xid = ep->xid; /* get our XID */
35835 } else if (!ep) {
35836 - atomic_inc(&mp->stats.xid_not_found);
35837 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35838 reject = FC_RJT_RX_ID; /* XID not found */
35839 goto out;
35840 }
35841 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35842 } else {
35843 sp = &ep->seq;
35844 if (sp->id != fh->fh_seq_id) {
35845 - atomic_inc(&mp->stats.seq_not_found);
35846 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35847 if (f_ctl & FC_FC_END_SEQ) {
35848 /*
35849 * Update sequence_id based on incoming last
35850 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35851
35852 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
35853 if (!ep) {
35854 - atomic_inc(&mp->stats.xid_not_found);
35855 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35856 goto out;
35857 }
35858 if (ep->esb_stat & ESB_ST_COMPLETE) {
35859 - atomic_inc(&mp->stats.xid_not_found);
35860 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35861 goto rel;
35862 }
35863 if (ep->rxid == FC_XID_UNKNOWN)
35864 ep->rxid = ntohs(fh->fh_rx_id);
35865 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
35866 - atomic_inc(&mp->stats.xid_not_found);
35867 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35868 goto rel;
35869 }
35870 if (ep->did != ntoh24(fh->fh_s_id) &&
35871 ep->did != FC_FID_FLOGI) {
35872 - atomic_inc(&mp->stats.xid_not_found);
35873 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35874 goto rel;
35875 }
35876 sof = fr_sof(fp);
35877 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35878 sp->ssb_stat |= SSB_ST_RESP;
35879 sp->id = fh->fh_seq_id;
35880 } else if (sp->id != fh->fh_seq_id) {
35881 - atomic_inc(&mp->stats.seq_not_found);
35882 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35883 goto rel;
35884 }
35885
35886 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35887 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
35888
35889 if (!sp)
35890 - atomic_inc(&mp->stats.xid_not_found);
35891 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35892 else
35893 - atomic_inc(&mp->stats.non_bls_resp);
35894 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
35895
35896 fc_frame_free(fp);
35897 }
35898 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
35899 index db9238f..4378ed2 100644
35900 --- a/drivers/scsi/libsas/sas_ata.c
35901 +++ b/drivers/scsi/libsas/sas_ata.c
35902 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
35903 .postreset = ata_std_postreset,
35904 .error_handler = ata_std_error_handler,
35905 .post_internal_cmd = sas_ata_post_internal,
35906 - .qc_defer = ata_std_qc_defer,
35907 + .qc_defer = ata_std_qc_defer,
35908 .qc_prep = ata_noop_qc_prep,
35909 .qc_issue = sas_ata_qc_issue,
35910 .qc_fill_rtf = sas_ata_qc_fill_rtf,
35911 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
35912 index 825f930..ce42672 100644
35913 --- a/drivers/scsi/lpfc/lpfc.h
35914 +++ b/drivers/scsi/lpfc/lpfc.h
35915 @@ -413,7 +413,7 @@ struct lpfc_vport {
35916 struct dentry *debug_nodelist;
35917 struct dentry *vport_debugfs_root;
35918 struct lpfc_debugfs_trc *disc_trc;
35919 - atomic_t disc_trc_cnt;
35920 + atomic_unchecked_t disc_trc_cnt;
35921 #endif
35922 uint8_t stat_data_enabled;
35923 uint8_t stat_data_blocked;
35924 @@ -821,8 +821,8 @@ struct lpfc_hba {
35925 struct timer_list fabric_block_timer;
35926 unsigned long bit_flags;
35927 #define FABRIC_COMANDS_BLOCKED 0
35928 - atomic_t num_rsrc_err;
35929 - atomic_t num_cmd_success;
35930 + atomic_unchecked_t num_rsrc_err;
35931 + atomic_unchecked_t num_cmd_success;
35932 unsigned long last_rsrc_error_time;
35933 unsigned long last_ramp_down_time;
35934 unsigned long last_ramp_up_time;
35935 @@ -852,7 +852,7 @@ struct lpfc_hba {
35936
35937 struct dentry *debug_slow_ring_trc;
35938 struct lpfc_debugfs_trc *slow_ring_trc;
35939 - atomic_t slow_ring_trc_cnt;
35940 + atomic_unchecked_t slow_ring_trc_cnt;
35941 /* iDiag debugfs sub-directory */
35942 struct dentry *idiag_root;
35943 struct dentry *idiag_pci_cfg;
35944 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
35945 index 3587a3f..d45b81b 100644
35946 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
35947 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
35948 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
35949
35950 #include <linux/debugfs.h>
35951
35952 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35953 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35954 static unsigned long lpfc_debugfs_start_time = 0L;
35955
35956 /* iDiag */
35957 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
35958 lpfc_debugfs_enable = 0;
35959
35960 len = 0;
35961 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
35962 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
35963 (lpfc_debugfs_max_disc_trc - 1);
35964 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
35965 dtp = vport->disc_trc + i;
35966 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
35967 lpfc_debugfs_enable = 0;
35968
35969 len = 0;
35970 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
35971 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
35972 (lpfc_debugfs_max_slow_ring_trc - 1);
35973 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
35974 dtp = phba->slow_ring_trc + i;
35975 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
35976 !vport || !vport->disc_trc)
35977 return;
35978
35979 - index = atomic_inc_return(&vport->disc_trc_cnt) &
35980 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
35981 (lpfc_debugfs_max_disc_trc - 1);
35982 dtp = vport->disc_trc + index;
35983 dtp->fmt = fmt;
35984 dtp->data1 = data1;
35985 dtp->data2 = data2;
35986 dtp->data3 = data3;
35987 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35988 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35989 dtp->jif = jiffies;
35990 #endif
35991 return;
35992 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
35993 !phba || !phba->slow_ring_trc)
35994 return;
35995
35996 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
35997 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
35998 (lpfc_debugfs_max_slow_ring_trc - 1);
35999 dtp = phba->slow_ring_trc + index;
36000 dtp->fmt = fmt;
36001 dtp->data1 = data1;
36002 dtp->data2 = data2;
36003 dtp->data3 = data3;
36004 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36005 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36006 dtp->jif = jiffies;
36007 #endif
36008 return;
36009 @@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36010 "slow_ring buffer\n");
36011 goto debug_failed;
36012 }
36013 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36014 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36015 memset(phba->slow_ring_trc, 0,
36016 (sizeof(struct lpfc_debugfs_trc) *
36017 lpfc_debugfs_max_slow_ring_trc));
36018 @@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36019 "buffer\n");
36020 goto debug_failed;
36021 }
36022 - atomic_set(&vport->disc_trc_cnt, 0);
36023 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36024
36025 snprintf(name, sizeof(name), "discovery_trace");
36026 vport->debug_disc_trc =
36027 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36028 index dfea2da..8e17227 100644
36029 --- a/drivers/scsi/lpfc/lpfc_init.c
36030 +++ b/drivers/scsi/lpfc/lpfc_init.c
36031 @@ -10145,8 +10145,10 @@ lpfc_init(void)
36032 printk(LPFC_COPYRIGHT "\n");
36033
36034 if (lpfc_enable_npiv) {
36035 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36036 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36037 + pax_open_kernel();
36038 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36039 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36040 + pax_close_kernel();
36041 }
36042 lpfc_transport_template =
36043 fc_attach_transport(&lpfc_transport_functions);
36044 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36045 index c60f5d0..751535c 100644
36046 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36047 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36048 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36049 uint32_t evt_posted;
36050
36051 spin_lock_irqsave(&phba->hbalock, flags);
36052 - atomic_inc(&phba->num_rsrc_err);
36053 + atomic_inc_unchecked(&phba->num_rsrc_err);
36054 phba->last_rsrc_error_time = jiffies;
36055
36056 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36057 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36058 unsigned long flags;
36059 struct lpfc_hba *phba = vport->phba;
36060 uint32_t evt_posted;
36061 - atomic_inc(&phba->num_cmd_success);
36062 + atomic_inc_unchecked(&phba->num_cmd_success);
36063
36064 if (vport->cfg_lun_queue_depth <= queue_depth)
36065 return;
36066 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36067 unsigned long num_rsrc_err, num_cmd_success;
36068 int i;
36069
36070 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36071 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36072 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36073 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36074
36075 vports = lpfc_create_vport_work_array(phba);
36076 if (vports != NULL)
36077 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36078 }
36079 }
36080 lpfc_destroy_vport_work_array(phba, vports);
36081 - atomic_set(&phba->num_rsrc_err, 0);
36082 - atomic_set(&phba->num_cmd_success, 0);
36083 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36084 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36085 }
36086
36087 /**
36088 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36089 }
36090 }
36091 lpfc_destroy_vport_work_array(phba, vports);
36092 - atomic_set(&phba->num_rsrc_err, 0);
36093 - atomic_set(&phba->num_cmd_success, 0);
36094 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36095 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36096 }
36097
36098 /**
36099 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36100 index ea8a0b4..812a124 100644
36101 --- a/drivers/scsi/pmcraid.c
36102 +++ b/drivers/scsi/pmcraid.c
36103 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36104 res->scsi_dev = scsi_dev;
36105 scsi_dev->hostdata = res;
36106 res->change_detected = 0;
36107 - atomic_set(&res->read_failures, 0);
36108 - atomic_set(&res->write_failures, 0);
36109 + atomic_set_unchecked(&res->read_failures, 0);
36110 + atomic_set_unchecked(&res->write_failures, 0);
36111 rc = 0;
36112 }
36113 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36114 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36115
36116 /* If this was a SCSI read/write command keep count of errors */
36117 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36118 - atomic_inc(&res->read_failures);
36119 + atomic_inc_unchecked(&res->read_failures);
36120 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36121 - atomic_inc(&res->write_failures);
36122 + atomic_inc_unchecked(&res->write_failures);
36123
36124 if (!RES_IS_GSCSI(res->cfg_entry) &&
36125 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36126 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36127 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36128 * hrrq_id assigned here in queuecommand
36129 */
36130 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36131 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36132 pinstance->num_hrrq;
36133 cmd->cmd_done = pmcraid_io_done;
36134
36135 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36136 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36137 * hrrq_id assigned here in queuecommand
36138 */
36139 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36140 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36141 pinstance->num_hrrq;
36142
36143 if (request_size) {
36144 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36145
36146 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36147 /* add resources only after host is added into system */
36148 - if (!atomic_read(&pinstance->expose_resources))
36149 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36150 return;
36151
36152 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36153 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36154 init_waitqueue_head(&pinstance->reset_wait_q);
36155
36156 atomic_set(&pinstance->outstanding_cmds, 0);
36157 - atomic_set(&pinstance->last_message_id, 0);
36158 - atomic_set(&pinstance->expose_resources, 0);
36159 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36160 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36161
36162 INIT_LIST_HEAD(&pinstance->free_res_q);
36163 INIT_LIST_HEAD(&pinstance->used_res_q);
36164 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36165 /* Schedule worker thread to handle CCN and take care of adding and
36166 * removing devices to OS
36167 */
36168 - atomic_set(&pinstance->expose_resources, 1);
36169 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36170 schedule_work(&pinstance->worker_q);
36171 return rc;
36172
36173 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36174 index ca496c7..9c791d5 100644
36175 --- a/drivers/scsi/pmcraid.h
36176 +++ b/drivers/scsi/pmcraid.h
36177 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36178 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36179
36180 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36181 - atomic_t last_message_id;
36182 + atomic_unchecked_t last_message_id;
36183
36184 /* configuration table */
36185 struct pmcraid_config_table *cfg_table;
36186 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36187 atomic_t outstanding_cmds;
36188
36189 /* should add/delete resources to mid-layer now ?*/
36190 - atomic_t expose_resources;
36191 + atomic_unchecked_t expose_resources;
36192
36193
36194
36195 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36196 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36197 };
36198 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36199 - atomic_t read_failures; /* count of failed READ commands */
36200 - atomic_t write_failures; /* count of failed WRITE commands */
36201 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36202 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36203
36204 /* To indicate add/delete/modify during CCN */
36205 u8 change_detected;
36206 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36207 index af1003f..be55a75 100644
36208 --- a/drivers/scsi/qla2xxx/qla_def.h
36209 +++ b/drivers/scsi/qla2xxx/qla_def.h
36210 @@ -2247,7 +2247,7 @@ struct isp_operations {
36211 int (*start_scsi) (srb_t *);
36212 int (*abort_isp) (struct scsi_qla_host *);
36213 int (*iospace_config)(struct qla_hw_data*);
36214 -};
36215 +} __no_const;
36216
36217 /* MSI-X Support *************************************************************/
36218
36219 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36220 index bfe6854..ceac088 100644
36221 --- a/drivers/scsi/qla4xxx/ql4_def.h
36222 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36223 @@ -261,7 +261,7 @@ struct ddb_entry {
36224 * (4000 only) */
36225 atomic_t relogin_timer; /* Max Time to wait for
36226 * relogin to complete */
36227 - atomic_t relogin_retry_count; /* Num of times relogin has been
36228 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36229 * retried */
36230 uint32_t default_time2wait; /* Default Min time between
36231 * relogins (+aens) */
36232 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36233 index ce6d3b7..73fac54 100644
36234 --- a/drivers/scsi/qla4xxx/ql4_os.c
36235 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36236 @@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36237 */
36238 if (!iscsi_is_session_online(cls_sess)) {
36239 /* Reset retry relogin timer */
36240 - atomic_inc(&ddb_entry->relogin_retry_count);
36241 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36242 DEBUG2(ql4_printk(KERN_INFO, ha,
36243 "%s: index[%d] relogin timed out-retrying"
36244 " relogin (%d), retry (%d)\n", __func__,
36245 ddb_entry->fw_ddb_index,
36246 - atomic_read(&ddb_entry->relogin_retry_count),
36247 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36248 ddb_entry->default_time2wait + 4));
36249 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36250 atomic_set(&ddb_entry->retry_relogin_timer,
36251 @@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36252
36253 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36254 atomic_set(&ddb_entry->relogin_timer, 0);
36255 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36256 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36257 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36258 ddb_entry->default_relogin_timeout =
36259 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36260 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36261 index 2aeb2e9..46e3925 100644
36262 --- a/drivers/scsi/scsi.c
36263 +++ b/drivers/scsi/scsi.c
36264 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36265 unsigned long timeout;
36266 int rtn = 0;
36267
36268 - atomic_inc(&cmd->device->iorequest_cnt);
36269 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36270
36271 /* check if the device is still usable */
36272 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36273 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36274 index b2c95db..227d74e 100644
36275 --- a/drivers/scsi/scsi_lib.c
36276 +++ b/drivers/scsi/scsi_lib.c
36277 @@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36278 shost = sdev->host;
36279 scsi_init_cmd_errh(cmd);
36280 cmd->result = DID_NO_CONNECT << 16;
36281 - atomic_inc(&cmd->device->iorequest_cnt);
36282 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36283
36284 /*
36285 * SCSI request completion path will do scsi_device_unbusy(),
36286 @@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
36287
36288 INIT_LIST_HEAD(&cmd->eh_entry);
36289
36290 - atomic_inc(&cmd->device->iodone_cnt);
36291 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36292 if (cmd->result)
36293 - atomic_inc(&cmd->device->ioerr_cnt);
36294 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36295
36296 disposition = scsi_decide_disposition(cmd);
36297 if (disposition != SUCCESS &&
36298 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36299 index 04c2a27..9d8bd66 100644
36300 --- a/drivers/scsi/scsi_sysfs.c
36301 +++ b/drivers/scsi/scsi_sysfs.c
36302 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36303 char *buf) \
36304 { \
36305 struct scsi_device *sdev = to_scsi_device(dev); \
36306 - unsigned long long count = atomic_read(&sdev->field); \
36307 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36308 return snprintf(buf, 20, "0x%llx\n", count); \
36309 } \
36310 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36311 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36312 index 84a1fdf..693b0d6 100644
36313 --- a/drivers/scsi/scsi_tgt_lib.c
36314 +++ b/drivers/scsi/scsi_tgt_lib.c
36315 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36316 int err;
36317
36318 dprintk("%lx %u\n", uaddr, len);
36319 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36320 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36321 if (err) {
36322 /*
36323 * TODO: need to fixup sg_tablesize, max_segment_size,
36324 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36325 index f59d4a0..1d89407 100644
36326 --- a/drivers/scsi/scsi_transport_fc.c
36327 +++ b/drivers/scsi/scsi_transport_fc.c
36328 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36329 * Netlink Infrastructure
36330 */
36331
36332 -static atomic_t fc_event_seq;
36333 +static atomic_unchecked_t fc_event_seq;
36334
36335 /**
36336 * fc_get_event_number - Obtain the next sequential FC event number
36337 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36338 u32
36339 fc_get_event_number(void)
36340 {
36341 - return atomic_add_return(1, &fc_event_seq);
36342 + return atomic_add_return_unchecked(1, &fc_event_seq);
36343 }
36344 EXPORT_SYMBOL(fc_get_event_number);
36345
36346 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36347 {
36348 int error;
36349
36350 - atomic_set(&fc_event_seq, 0);
36351 + atomic_set_unchecked(&fc_event_seq, 0);
36352
36353 error = transport_class_register(&fc_host_class);
36354 if (error)
36355 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36356 char *cp;
36357
36358 *val = simple_strtoul(buf, &cp, 0);
36359 - if ((*cp && (*cp != '\n')) || (*val < 0))
36360 + if (*cp && (*cp != '\n'))
36361 return -EINVAL;
36362 /*
36363 * Check for overflow; dev_loss_tmo is u32
36364 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36365 index cfd4914..ddd7129 100644
36366 --- a/drivers/scsi/scsi_transport_iscsi.c
36367 +++ b/drivers/scsi/scsi_transport_iscsi.c
36368 @@ -79,7 +79,7 @@ struct iscsi_internal {
36369 struct transport_container session_cont;
36370 };
36371
36372 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36373 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36374 static struct workqueue_struct *iscsi_eh_timer_workq;
36375
36376 static DEFINE_IDA(iscsi_sess_ida);
36377 @@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36378 int err;
36379
36380 ihost = shost->shost_data;
36381 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36382 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36383
36384 if (target_id == ISCSI_MAX_TARGET) {
36385 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36386 @@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
36387 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36388 ISCSI_TRANSPORT_VERSION);
36389
36390 - atomic_set(&iscsi_session_nr, 0);
36391 + atomic_set_unchecked(&iscsi_session_nr, 0);
36392
36393 err = class_register(&iscsi_transport_class);
36394 if (err)
36395 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36396 index 21a045e..ec89e03 100644
36397 --- a/drivers/scsi/scsi_transport_srp.c
36398 +++ b/drivers/scsi/scsi_transport_srp.c
36399 @@ -33,7 +33,7 @@
36400 #include "scsi_transport_srp_internal.h"
36401
36402 struct srp_host_attrs {
36403 - atomic_t next_port_id;
36404 + atomic_unchecked_t next_port_id;
36405 };
36406 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36407
36408 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36409 struct Scsi_Host *shost = dev_to_shost(dev);
36410 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36411
36412 - atomic_set(&srp_host->next_port_id, 0);
36413 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36414 return 0;
36415 }
36416
36417 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36418 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36419 rport->roles = ids->roles;
36420
36421 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36422 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36423 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36424
36425 transport_setup_device(&rport->dev);
36426 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36427 index eacd46b..e3f4d62 100644
36428 --- a/drivers/scsi/sg.c
36429 +++ b/drivers/scsi/sg.c
36430 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36431 sdp->disk->disk_name,
36432 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36433 NULL,
36434 - (char *)arg);
36435 + (char __user *)arg);
36436 case BLKTRACESTART:
36437 return blk_trace_startstop(sdp->device->request_queue, 1);
36438 case BLKTRACESTOP:
36439 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36440 const struct file_operations * fops;
36441 };
36442
36443 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36444 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36445 {"allow_dio", &adio_fops},
36446 {"debug", &debug_fops},
36447 {"def_reserved_size", &dressz_fops},
36448 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
36449 if (!sg_proc_sgp)
36450 return 1;
36451 for (k = 0; k < num_leaves; ++k) {
36452 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36453 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36454 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36455 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36456 }
36457 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36458 index f64250e..1ee3049 100644
36459 --- a/drivers/spi/spi-dw-pci.c
36460 +++ b/drivers/spi/spi-dw-pci.c
36461 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
36462 #define spi_resume NULL
36463 #endif
36464
36465 -static const struct pci_device_id pci_ids[] __devinitdata = {
36466 +static const struct pci_device_id pci_ids[] __devinitconst = {
36467 /* Intel MID platform SPI controller 0 */
36468 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36469 {},
36470 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36471 index b2ccdea..84cde75 100644
36472 --- a/drivers/spi/spi.c
36473 +++ b/drivers/spi/spi.c
36474 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
36475 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36476
36477 /* portable code must never pass more than 32 bytes */
36478 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36479 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36480
36481 static u8 *buf;
36482
36483 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36484 index 400df8c..065d4f4 100644
36485 --- a/drivers/staging/octeon/ethernet-rx.c
36486 +++ b/drivers/staging/octeon/ethernet-rx.c
36487 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36488 /* Increment RX stats for virtual ports */
36489 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36490 #ifdef CONFIG_64BIT
36491 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36492 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36493 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36494 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36495 #else
36496 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36497 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36498 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36499 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36500 #endif
36501 }
36502 netif_receive_skb(skb);
36503 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36504 dev->name);
36505 */
36506 #ifdef CONFIG_64BIT
36507 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36508 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36509 #else
36510 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36511 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36512 #endif
36513 dev_kfree_skb_irq(skb);
36514 }
36515 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36516 index 9112cd8..92f8d51 100644
36517 --- a/drivers/staging/octeon/ethernet.c
36518 +++ b/drivers/staging/octeon/ethernet.c
36519 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36520 * since the RX tasklet also increments it.
36521 */
36522 #ifdef CONFIG_64BIT
36523 - atomic64_add(rx_status.dropped_packets,
36524 - (atomic64_t *)&priv->stats.rx_dropped);
36525 + atomic64_add_unchecked(rx_status.dropped_packets,
36526 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36527 #else
36528 - atomic_add(rx_status.dropped_packets,
36529 - (atomic_t *)&priv->stats.rx_dropped);
36530 + atomic_add_unchecked(rx_status.dropped_packets,
36531 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36532 #endif
36533 }
36534
36535 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36536 index 86308a0..feaa925 100644
36537 --- a/drivers/staging/rtl8712/rtl871x_io.h
36538 +++ b/drivers/staging/rtl8712/rtl871x_io.h
36539 @@ -108,7 +108,7 @@ struct _io_ops {
36540 u8 *pmem);
36541 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36542 u8 *pmem);
36543 -};
36544 +} __no_const;
36545
36546 struct io_req {
36547 struct list_head list;
36548 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
36549 index c7b5e8b..783d6cb 100644
36550 --- a/drivers/staging/sbe-2t3e3/netdev.c
36551 +++ b/drivers/staging/sbe-2t3e3/netdev.c
36552 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36553 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36554
36555 if (rlen)
36556 - if (copy_to_user(data, &resp, rlen))
36557 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
36558 return -EFAULT;
36559
36560 return 0;
36561 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
36562 index 42cdafe..2769103 100644
36563 --- a/drivers/staging/speakup/speakup_soft.c
36564 +++ b/drivers/staging/speakup/speakup_soft.c
36565 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
36566 break;
36567 } else if (!initialized) {
36568 if (*init) {
36569 - ch = *init;
36570 init++;
36571 } else {
36572 initialized = 1;
36573 }
36574 + ch = *init;
36575 } else {
36576 ch = synth_buffer_getc();
36577 }
36578 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
36579 index b8f8c48..1fc5025 100644
36580 --- a/drivers/staging/usbip/usbip_common.h
36581 +++ b/drivers/staging/usbip/usbip_common.h
36582 @@ -289,7 +289,7 @@ struct usbip_device {
36583 void (*shutdown)(struct usbip_device *);
36584 void (*reset)(struct usbip_device *);
36585 void (*unusable)(struct usbip_device *);
36586 - } eh_ops;
36587 + } __no_const eh_ops;
36588 };
36589
36590 /* usbip_common.c */
36591 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
36592 index 88b3298..3783eee 100644
36593 --- a/drivers/staging/usbip/vhci.h
36594 +++ b/drivers/staging/usbip/vhci.h
36595 @@ -88,7 +88,7 @@ struct vhci_hcd {
36596 unsigned resuming:1;
36597 unsigned long re_timeout;
36598
36599 - atomic_t seqnum;
36600 + atomic_unchecked_t seqnum;
36601
36602 /*
36603 * NOTE:
36604 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
36605 index 2ee97e2..0420b86 100644
36606 --- a/drivers/staging/usbip/vhci_hcd.c
36607 +++ b/drivers/staging/usbip/vhci_hcd.c
36608 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
36609 return;
36610 }
36611
36612 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
36613 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36614 if (priv->seqnum == 0xffff)
36615 dev_info(&urb->dev->dev, "seqnum max\n");
36616
36617 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
36618 return -ENOMEM;
36619 }
36620
36621 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
36622 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36623 if (unlink->seqnum == 0xffff)
36624 pr_info("seqnum max\n");
36625
36626 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
36627 vdev->rhport = rhport;
36628 }
36629
36630 - atomic_set(&vhci->seqnum, 0);
36631 + atomic_set_unchecked(&vhci->seqnum, 0);
36632 spin_lock_init(&vhci->lock);
36633
36634 hcd->power_budget = 0; /* no limit */
36635 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
36636 index 3f511b4..d3dbc1e 100644
36637 --- a/drivers/staging/usbip/vhci_rx.c
36638 +++ b/drivers/staging/usbip/vhci_rx.c
36639 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
36640 if (!urb) {
36641 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
36642 pr_info("max seqnum %d\n",
36643 - atomic_read(&the_controller->seqnum));
36644 + atomic_read_unchecked(&the_controller->seqnum));
36645 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
36646 return;
36647 }
36648 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
36649 index 7735027..30eed13 100644
36650 --- a/drivers/staging/vt6655/hostap.c
36651 +++ b/drivers/staging/vt6655/hostap.c
36652 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
36653 *
36654 */
36655
36656 +static net_device_ops_no_const apdev_netdev_ops;
36657 +
36658 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36659 {
36660 PSDevice apdev_priv;
36661 struct net_device *dev = pDevice->dev;
36662 int ret;
36663 - const struct net_device_ops apdev_netdev_ops = {
36664 - .ndo_start_xmit = pDevice->tx_80211,
36665 - };
36666
36667 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36668
36669 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36670 *apdev_priv = *pDevice;
36671 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36672
36673 + /* only half broken now */
36674 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36675 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36676
36677 pDevice->apdev->type = ARPHRD_IEEE80211;
36678 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
36679 index 51b5adf..098e320 100644
36680 --- a/drivers/staging/vt6656/hostap.c
36681 +++ b/drivers/staging/vt6656/hostap.c
36682 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
36683 *
36684 */
36685
36686 +static net_device_ops_no_const apdev_netdev_ops;
36687 +
36688 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36689 {
36690 PSDevice apdev_priv;
36691 struct net_device *dev = pDevice->dev;
36692 int ret;
36693 - const struct net_device_ops apdev_netdev_ops = {
36694 - .ndo_start_xmit = pDevice->tx_80211,
36695 - };
36696
36697 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36698
36699 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36700 *apdev_priv = *pDevice;
36701 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36702
36703 + /* only half broken now */
36704 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36705 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36706
36707 pDevice->apdev->type = ARPHRD_IEEE80211;
36708 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
36709 index 7843dfd..3db105f 100644
36710 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
36711 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
36712 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
36713
36714 struct usbctlx_completor {
36715 int (*complete) (struct usbctlx_completor *);
36716 -};
36717 +} __no_const;
36718
36719 static int
36720 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
36721 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
36722 index 1ca66ea..76f1343 100644
36723 --- a/drivers/staging/zcache/tmem.c
36724 +++ b/drivers/staging/zcache/tmem.c
36725 @@ -39,7 +39,7 @@
36726 * A tmem host implementation must use this function to register callbacks
36727 * for memory allocation.
36728 */
36729 -static struct tmem_hostops tmem_hostops;
36730 +static tmem_hostops_no_const tmem_hostops;
36731
36732 static void tmem_objnode_tree_init(void);
36733
36734 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
36735 * A tmem host implementation must use this function to register
36736 * callbacks for a page-accessible memory (PAM) implementation
36737 */
36738 -static struct tmem_pamops tmem_pamops;
36739 +static tmem_pamops_no_const tmem_pamops;
36740
36741 void tmem_register_pamops(struct tmem_pamops *m)
36742 {
36743 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
36744 index ed147c4..94fc3c6 100644
36745 --- a/drivers/staging/zcache/tmem.h
36746 +++ b/drivers/staging/zcache/tmem.h
36747 @@ -180,6 +180,7 @@ struct tmem_pamops {
36748 void (*new_obj)(struct tmem_obj *);
36749 int (*replace_in_obj)(void *, struct tmem_obj *);
36750 };
36751 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
36752 extern void tmem_register_pamops(struct tmem_pamops *m);
36753
36754 /* memory allocation methods provided by the host implementation */
36755 @@ -189,6 +190,7 @@ struct tmem_hostops {
36756 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
36757 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
36758 };
36759 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
36760 extern void tmem_register_hostops(struct tmem_hostops *m);
36761
36762 /* core tmem accessor functions */
36763 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
36764 index 501b27c..39dc3d3 100644
36765 --- a/drivers/target/iscsi/iscsi_target.c
36766 +++ b/drivers/target/iscsi/iscsi_target.c
36767 @@ -1363,7 +1363,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
36768 * outstanding_r2ts reaches zero, go ahead and send the delayed
36769 * TASK_ABORTED status.
36770 */
36771 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
36772 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
36773 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
36774 if (--cmd->outstanding_r2ts < 1) {
36775 iscsit_stop_dataout_timer(cmd);
36776 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
36777 index dcb0618..97e3d85 100644
36778 --- a/drivers/target/target_core_tmr.c
36779 +++ b/drivers/target/target_core_tmr.c
36780 @@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
36781 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
36782 cmd->t_task_list_num,
36783 atomic_read(&cmd->t_task_cdbs_left),
36784 - atomic_read(&cmd->t_task_cdbs_sent),
36785 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36786 atomic_read(&cmd->t_transport_active),
36787 atomic_read(&cmd->t_transport_stop),
36788 atomic_read(&cmd->t_transport_sent));
36789 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
36790 pr_debug("LUN_RESET: got t_transport_active = 1 for"
36791 " task: %p, t_fe_count: %d dev: %p\n", task,
36792 fe_count, dev);
36793 - atomic_set(&cmd->t_transport_aborted, 1);
36794 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36795 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36796
36797 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36798 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
36799 }
36800 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
36801 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
36802 - atomic_set(&cmd->t_transport_aborted, 1);
36803 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36804 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36805
36806 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36807 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
36808 index cd5cd95..5249d30 100644
36809 --- a/drivers/target/target_core_transport.c
36810 +++ b/drivers/target/target_core_transport.c
36811 @@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
36812 spin_lock_init(&dev->se_port_lock);
36813 spin_lock_init(&dev->se_tmr_lock);
36814 spin_lock_init(&dev->qf_cmd_lock);
36815 - atomic_set(&dev->dev_ordered_id, 0);
36816 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
36817
36818 se_dev_set_default_attribs(dev, dev_limits);
36819
36820 @@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
36821 * Used to determine when ORDERED commands should go from
36822 * Dormant to Active status.
36823 */
36824 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
36825 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
36826 smp_mb__after_atomic_inc();
36827 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
36828 cmd->se_ordered_id, cmd->sam_task_attr,
36829 @@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
36830 " t_transport_active: %d t_transport_stop: %d"
36831 " t_transport_sent: %d\n", cmd->t_task_list_num,
36832 atomic_read(&cmd->t_task_cdbs_left),
36833 - atomic_read(&cmd->t_task_cdbs_sent),
36834 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36835 atomic_read(&cmd->t_task_cdbs_ex_left),
36836 atomic_read(&cmd->t_transport_active),
36837 atomic_read(&cmd->t_transport_stop),
36838 @@ -2121,9 +2121,9 @@ check_depth:
36839 cmd = task->task_se_cmd;
36840 spin_lock_irqsave(&cmd->t_state_lock, flags);
36841 task->task_flags |= (TF_ACTIVE | TF_SENT);
36842 - atomic_inc(&cmd->t_task_cdbs_sent);
36843 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
36844
36845 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
36846 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
36847 cmd->t_task_list_num)
36848 atomic_set(&cmd->t_transport_sent, 1);
36849
36850 @@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
36851 atomic_set(&cmd->transport_lun_stop, 0);
36852 }
36853 if (!atomic_read(&cmd->t_transport_active) ||
36854 - atomic_read(&cmd->t_transport_aborted)) {
36855 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
36856 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36857 return false;
36858 }
36859 @@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
36860 {
36861 int ret = 0;
36862
36863 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
36864 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
36865 if (!send_status ||
36866 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
36867 return 1;
36868 @@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
36869 */
36870 if (cmd->data_direction == DMA_TO_DEVICE) {
36871 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
36872 - atomic_inc(&cmd->t_transport_aborted);
36873 + atomic_inc_unchecked(&cmd->t_transport_aborted);
36874 smp_mb__after_atomic_inc();
36875 }
36876 }
36877 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
36878 index b9040be..e3f5aab 100644
36879 --- a/drivers/tty/hvc/hvcs.c
36880 +++ b/drivers/tty/hvc/hvcs.c
36881 @@ -83,6 +83,7 @@
36882 #include <asm/hvcserver.h>
36883 #include <asm/uaccess.h>
36884 #include <asm/vio.h>
36885 +#include <asm/local.h>
36886
36887 /*
36888 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
36889 @@ -270,7 +271,7 @@ struct hvcs_struct {
36890 unsigned int index;
36891
36892 struct tty_struct *tty;
36893 - int open_count;
36894 + local_t open_count;
36895
36896 /*
36897 * Used to tell the driver kernel_thread what operations need to take
36898 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
36899
36900 spin_lock_irqsave(&hvcsd->lock, flags);
36901
36902 - if (hvcsd->open_count > 0) {
36903 + if (local_read(&hvcsd->open_count) > 0) {
36904 spin_unlock_irqrestore(&hvcsd->lock, flags);
36905 printk(KERN_INFO "HVCS: vterm state unchanged. "
36906 "The hvcs device node is still in use.\n");
36907 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
36908 if ((retval = hvcs_partner_connect(hvcsd)))
36909 goto error_release;
36910
36911 - hvcsd->open_count = 1;
36912 + local_set(&hvcsd->open_count, 1);
36913 hvcsd->tty = tty;
36914 tty->driver_data = hvcsd;
36915
36916 @@ -1179,7 +1180,7 @@ fast_open:
36917
36918 spin_lock_irqsave(&hvcsd->lock, flags);
36919 kref_get(&hvcsd->kref);
36920 - hvcsd->open_count++;
36921 + local_inc(&hvcsd->open_count);
36922 hvcsd->todo_mask |= HVCS_SCHED_READ;
36923 spin_unlock_irqrestore(&hvcsd->lock, flags);
36924
36925 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
36926 hvcsd = tty->driver_data;
36927
36928 spin_lock_irqsave(&hvcsd->lock, flags);
36929 - if (--hvcsd->open_count == 0) {
36930 + if (local_dec_and_test(&hvcsd->open_count)) {
36931
36932 vio_disable_interrupts(hvcsd->vdev);
36933
36934 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
36935 free_irq(irq, hvcsd);
36936 kref_put(&hvcsd->kref, destroy_hvcs_struct);
36937 return;
36938 - } else if (hvcsd->open_count < 0) {
36939 + } else if (local_read(&hvcsd->open_count) < 0) {
36940 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
36941 " is missmanaged.\n",
36942 - hvcsd->vdev->unit_address, hvcsd->open_count);
36943 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
36944 }
36945
36946 spin_unlock_irqrestore(&hvcsd->lock, flags);
36947 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
36948
36949 spin_lock_irqsave(&hvcsd->lock, flags);
36950 /* Preserve this so that we know how many kref refs to put */
36951 - temp_open_count = hvcsd->open_count;
36952 + temp_open_count = local_read(&hvcsd->open_count);
36953
36954 /*
36955 * Don't kref put inside the spinlock because the destruction
36956 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
36957 hvcsd->tty->driver_data = NULL;
36958 hvcsd->tty = NULL;
36959
36960 - hvcsd->open_count = 0;
36961 + local_set(&hvcsd->open_count, 0);
36962
36963 /* This will drop any buffered data on the floor which is OK in a hangup
36964 * scenario. */
36965 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
36966 * the middle of a write operation? This is a crummy place to do this
36967 * but we want to keep it all in the spinlock.
36968 */
36969 - if (hvcsd->open_count <= 0) {
36970 + if (local_read(&hvcsd->open_count) <= 0) {
36971 spin_unlock_irqrestore(&hvcsd->lock, flags);
36972 return -ENODEV;
36973 }
36974 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
36975 {
36976 struct hvcs_struct *hvcsd = tty->driver_data;
36977
36978 - if (!hvcsd || hvcsd->open_count <= 0)
36979 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
36980 return 0;
36981
36982 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
36983 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
36984 index ef92869..f4ebd88 100644
36985 --- a/drivers/tty/ipwireless/tty.c
36986 +++ b/drivers/tty/ipwireless/tty.c
36987 @@ -29,6 +29,7 @@
36988 #include <linux/tty_driver.h>
36989 #include <linux/tty_flip.h>
36990 #include <linux/uaccess.h>
36991 +#include <asm/local.h>
36992
36993 #include "tty.h"
36994 #include "network.h"
36995 @@ -51,7 +52,7 @@ struct ipw_tty {
36996 int tty_type;
36997 struct ipw_network *network;
36998 struct tty_struct *linux_tty;
36999 - int open_count;
37000 + local_t open_count;
37001 unsigned int control_lines;
37002 struct mutex ipw_tty_mutex;
37003 int tx_bytes_queued;
37004 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37005 mutex_unlock(&tty->ipw_tty_mutex);
37006 return -ENODEV;
37007 }
37008 - if (tty->open_count == 0)
37009 + if (local_read(&tty->open_count) == 0)
37010 tty->tx_bytes_queued = 0;
37011
37012 - tty->open_count++;
37013 + local_inc(&tty->open_count);
37014
37015 tty->linux_tty = linux_tty;
37016 linux_tty->driver_data = tty;
37017 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37018
37019 static void do_ipw_close(struct ipw_tty *tty)
37020 {
37021 - tty->open_count--;
37022 -
37023 - if (tty->open_count == 0) {
37024 + if (local_dec_return(&tty->open_count) == 0) {
37025 struct tty_struct *linux_tty = tty->linux_tty;
37026
37027 if (linux_tty != NULL) {
37028 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37029 return;
37030
37031 mutex_lock(&tty->ipw_tty_mutex);
37032 - if (tty->open_count == 0) {
37033 + if (local_read(&tty->open_count) == 0) {
37034 mutex_unlock(&tty->ipw_tty_mutex);
37035 return;
37036 }
37037 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37038 return;
37039 }
37040
37041 - if (!tty->open_count) {
37042 + if (!local_read(&tty->open_count)) {
37043 mutex_unlock(&tty->ipw_tty_mutex);
37044 return;
37045 }
37046 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37047 return -ENODEV;
37048
37049 mutex_lock(&tty->ipw_tty_mutex);
37050 - if (!tty->open_count) {
37051 + if (!local_read(&tty->open_count)) {
37052 mutex_unlock(&tty->ipw_tty_mutex);
37053 return -EINVAL;
37054 }
37055 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37056 if (!tty)
37057 return -ENODEV;
37058
37059 - if (!tty->open_count)
37060 + if (!local_read(&tty->open_count))
37061 return -EINVAL;
37062
37063 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37064 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37065 if (!tty)
37066 return 0;
37067
37068 - if (!tty->open_count)
37069 + if (!local_read(&tty->open_count))
37070 return 0;
37071
37072 return tty->tx_bytes_queued;
37073 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37074 if (!tty)
37075 return -ENODEV;
37076
37077 - if (!tty->open_count)
37078 + if (!local_read(&tty->open_count))
37079 return -EINVAL;
37080
37081 return get_control_lines(tty);
37082 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37083 if (!tty)
37084 return -ENODEV;
37085
37086 - if (!tty->open_count)
37087 + if (!local_read(&tty->open_count))
37088 return -EINVAL;
37089
37090 return set_control_lines(tty, set, clear);
37091 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37092 if (!tty)
37093 return -ENODEV;
37094
37095 - if (!tty->open_count)
37096 + if (!local_read(&tty->open_count))
37097 return -EINVAL;
37098
37099 /* FIXME: Exactly how is the tty object locked here .. */
37100 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37101 against a parallel ioctl etc */
37102 mutex_lock(&ttyj->ipw_tty_mutex);
37103 }
37104 - while (ttyj->open_count)
37105 + while (local_read(&ttyj->open_count))
37106 do_ipw_close(ttyj);
37107 ipwireless_disassociate_network_ttys(network,
37108 ttyj->channel_idx);
37109 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37110 index fc7bbba..9527e93 100644
37111 --- a/drivers/tty/n_gsm.c
37112 +++ b/drivers/tty/n_gsm.c
37113 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37114 kref_init(&dlci->ref);
37115 mutex_init(&dlci->mutex);
37116 dlci->fifo = &dlci->_fifo;
37117 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37118 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37119 kfree(dlci);
37120 return NULL;
37121 }
37122 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37123 index d2256d0..97476fa 100644
37124 --- a/drivers/tty/n_tty.c
37125 +++ b/drivers/tty/n_tty.c
37126 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37127 {
37128 *ops = tty_ldisc_N_TTY;
37129 ops->owner = NULL;
37130 - ops->refcount = ops->flags = 0;
37131 + atomic_set(&ops->refcount, 0);
37132 + ops->flags = 0;
37133 }
37134 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37135 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37136 index d8653ab..f8afd9d 100644
37137 --- a/drivers/tty/pty.c
37138 +++ b/drivers/tty/pty.c
37139 @@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
37140 register_sysctl_table(pty_root_table);
37141
37142 /* Now create the /dev/ptmx special device */
37143 + pax_open_kernel();
37144 tty_default_fops(&ptmx_fops);
37145 - ptmx_fops.open = ptmx_open;
37146 + *(void **)&ptmx_fops.open = ptmx_open;
37147 + pax_close_kernel();
37148
37149 cdev_init(&ptmx_cdev, &ptmx_fops);
37150 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37151 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37152 index 2b42a01..32a2ed3 100644
37153 --- a/drivers/tty/serial/kgdboc.c
37154 +++ b/drivers/tty/serial/kgdboc.c
37155 @@ -24,8 +24,9 @@
37156 #define MAX_CONFIG_LEN 40
37157
37158 static struct kgdb_io kgdboc_io_ops;
37159 +static struct kgdb_io kgdboc_io_ops_console;
37160
37161 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37162 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37163 static int configured = -1;
37164
37165 static char config[MAX_CONFIG_LEN];
37166 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37167 kgdboc_unregister_kbd();
37168 if (configured == 1)
37169 kgdb_unregister_io_module(&kgdboc_io_ops);
37170 + else if (configured == 2)
37171 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37172 }
37173
37174 static int configure_kgdboc(void)
37175 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37176 int err;
37177 char *cptr = config;
37178 struct console *cons;
37179 + int is_console = 0;
37180
37181 err = kgdboc_option_setup(config);
37182 if (err || !strlen(config) || isspace(config[0]))
37183 goto noconfig;
37184
37185 err = -ENODEV;
37186 - kgdboc_io_ops.is_console = 0;
37187 kgdb_tty_driver = NULL;
37188
37189 kgdboc_use_kms = 0;
37190 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37191 int idx;
37192 if (cons->device && cons->device(cons, &idx) == p &&
37193 idx == tty_line) {
37194 - kgdboc_io_ops.is_console = 1;
37195 + is_console = 1;
37196 break;
37197 }
37198 cons = cons->next;
37199 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37200 kgdb_tty_line = tty_line;
37201
37202 do_register:
37203 - err = kgdb_register_io_module(&kgdboc_io_ops);
37204 + if (is_console) {
37205 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37206 + configured = 2;
37207 + } else {
37208 + err = kgdb_register_io_module(&kgdboc_io_ops);
37209 + configured = 1;
37210 + }
37211 if (err)
37212 goto noconfig;
37213
37214 - configured = 1;
37215 -
37216 return 0;
37217
37218 noconfig:
37219 @@ -213,7 +220,7 @@ noconfig:
37220 static int __init init_kgdboc(void)
37221 {
37222 /* Already configured? */
37223 - if (configured == 1)
37224 + if (configured >= 1)
37225 return 0;
37226
37227 return configure_kgdboc();
37228 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37229 if (config[len - 1] == '\n')
37230 config[len - 1] = '\0';
37231
37232 - if (configured == 1)
37233 + if (configured >= 1)
37234 cleanup_kgdboc();
37235
37236 /* Go and configure with the new params. */
37237 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37238 .post_exception = kgdboc_post_exp_handler,
37239 };
37240
37241 +static struct kgdb_io kgdboc_io_ops_console = {
37242 + .name = "kgdboc",
37243 + .read_char = kgdboc_get_char,
37244 + .write_char = kgdboc_put_char,
37245 + .pre_exception = kgdboc_pre_exp_handler,
37246 + .post_exception = kgdboc_post_exp_handler,
37247 + .is_console = 1
37248 +};
37249 +
37250 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37251 /* This is only available if kgdboc is a built in for early debugging */
37252 static int __init kgdboc_early_init(char *opt)
37253 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37254 index e41b9bb..84002fb 100644
37255 --- a/drivers/tty/tty_io.c
37256 +++ b/drivers/tty/tty_io.c
37257 @@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37258
37259 void tty_default_fops(struct file_operations *fops)
37260 {
37261 - *fops = tty_fops;
37262 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37263 }
37264
37265 /*
37266 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37267 index 24b95db..9c078d0 100644
37268 --- a/drivers/tty/tty_ldisc.c
37269 +++ b/drivers/tty/tty_ldisc.c
37270 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37271 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37272 struct tty_ldisc_ops *ldo = ld->ops;
37273
37274 - ldo->refcount--;
37275 + atomic_dec(&ldo->refcount);
37276 module_put(ldo->owner);
37277 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37278
37279 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37280 spin_lock_irqsave(&tty_ldisc_lock, flags);
37281 tty_ldiscs[disc] = new_ldisc;
37282 new_ldisc->num = disc;
37283 - new_ldisc->refcount = 0;
37284 + atomic_set(&new_ldisc->refcount, 0);
37285 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37286
37287 return ret;
37288 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37289 return -EINVAL;
37290
37291 spin_lock_irqsave(&tty_ldisc_lock, flags);
37292 - if (tty_ldiscs[disc]->refcount)
37293 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37294 ret = -EBUSY;
37295 else
37296 tty_ldiscs[disc] = NULL;
37297 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37298 if (ldops) {
37299 ret = ERR_PTR(-EAGAIN);
37300 if (try_module_get(ldops->owner)) {
37301 - ldops->refcount++;
37302 + atomic_inc(&ldops->refcount);
37303 ret = ldops;
37304 }
37305 }
37306 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37307 unsigned long flags;
37308
37309 spin_lock_irqsave(&tty_ldisc_lock, flags);
37310 - ldops->refcount--;
37311 + atomic_dec(&ldops->refcount);
37312 module_put(ldops->owner);
37313 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37314 }
37315 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37316 index a605549..6bd3c96 100644
37317 --- a/drivers/tty/vt/keyboard.c
37318 +++ b/drivers/tty/vt/keyboard.c
37319 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37320 kbd->kbdmode == VC_OFF) &&
37321 value != KVAL(K_SAK))
37322 return; /* SAK is allowed even in raw mode */
37323 +
37324 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37325 + {
37326 + void *func = fn_handler[value];
37327 + if (func == fn_show_state || func == fn_show_ptregs ||
37328 + func == fn_show_mem)
37329 + return;
37330 + }
37331 +#endif
37332 +
37333 fn_handler[value](vc);
37334 }
37335
37336 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
37337 index 65447c5..0526f0a 100644
37338 --- a/drivers/tty/vt/vt_ioctl.c
37339 +++ b/drivers/tty/vt/vt_ioctl.c
37340 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37341 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37342 return -EFAULT;
37343
37344 - if (!capable(CAP_SYS_TTY_CONFIG))
37345 - perm = 0;
37346 -
37347 switch (cmd) {
37348 case KDGKBENT:
37349 key_map = key_maps[s];
37350 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37351 val = (i ? K_HOLE : K_NOSUCHMAP);
37352 return put_user(val, &user_kbe->kb_value);
37353 case KDSKBENT:
37354 + if (!capable(CAP_SYS_TTY_CONFIG))
37355 + perm = 0;
37356 +
37357 if (!perm)
37358 return -EPERM;
37359 if (!i && v == K_NOSUCHMAP) {
37360 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37361 int i, j, k;
37362 int ret;
37363
37364 - if (!capable(CAP_SYS_TTY_CONFIG))
37365 - perm = 0;
37366 -
37367 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37368 if (!kbs) {
37369 ret = -ENOMEM;
37370 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37371 kfree(kbs);
37372 return ((p && *p) ? -EOVERFLOW : 0);
37373 case KDSKBSENT:
37374 + if (!capable(CAP_SYS_TTY_CONFIG))
37375 + perm = 0;
37376 +
37377 if (!perm) {
37378 ret = -EPERM;
37379 goto reterr;
37380 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37381 index a783d53..cb30d94 100644
37382 --- a/drivers/uio/uio.c
37383 +++ b/drivers/uio/uio.c
37384 @@ -25,6 +25,7 @@
37385 #include <linux/kobject.h>
37386 #include <linux/cdev.h>
37387 #include <linux/uio_driver.h>
37388 +#include <asm/local.h>
37389
37390 #define UIO_MAX_DEVICES (1U << MINORBITS)
37391
37392 @@ -32,10 +33,10 @@ struct uio_device {
37393 struct module *owner;
37394 struct device *dev;
37395 int minor;
37396 - atomic_t event;
37397 + atomic_unchecked_t event;
37398 struct fasync_struct *async_queue;
37399 wait_queue_head_t wait;
37400 - int vma_count;
37401 + local_t vma_count;
37402 struct uio_info *info;
37403 struct kobject *map_dir;
37404 struct kobject *portio_dir;
37405 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37406 struct device_attribute *attr, char *buf)
37407 {
37408 struct uio_device *idev = dev_get_drvdata(dev);
37409 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37410 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37411 }
37412
37413 static struct device_attribute uio_class_attributes[] = {
37414 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37415 {
37416 struct uio_device *idev = info->uio_dev;
37417
37418 - atomic_inc(&idev->event);
37419 + atomic_inc_unchecked(&idev->event);
37420 wake_up_interruptible(&idev->wait);
37421 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37422 }
37423 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37424 }
37425
37426 listener->dev = idev;
37427 - listener->event_count = atomic_read(&idev->event);
37428 + listener->event_count = atomic_read_unchecked(&idev->event);
37429 filep->private_data = listener;
37430
37431 if (idev->info->open) {
37432 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37433 return -EIO;
37434
37435 poll_wait(filep, &idev->wait, wait);
37436 - if (listener->event_count != atomic_read(&idev->event))
37437 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37438 return POLLIN | POLLRDNORM;
37439 return 0;
37440 }
37441 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37442 do {
37443 set_current_state(TASK_INTERRUPTIBLE);
37444
37445 - event_count = atomic_read(&idev->event);
37446 + event_count = atomic_read_unchecked(&idev->event);
37447 if (event_count != listener->event_count) {
37448 if (copy_to_user(buf, &event_count, count))
37449 retval = -EFAULT;
37450 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37451 static void uio_vma_open(struct vm_area_struct *vma)
37452 {
37453 struct uio_device *idev = vma->vm_private_data;
37454 - idev->vma_count++;
37455 + local_inc(&idev->vma_count);
37456 }
37457
37458 static void uio_vma_close(struct vm_area_struct *vma)
37459 {
37460 struct uio_device *idev = vma->vm_private_data;
37461 - idev->vma_count--;
37462 + local_dec(&idev->vma_count);
37463 }
37464
37465 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37466 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37467 idev->owner = owner;
37468 idev->info = info;
37469 init_waitqueue_head(&idev->wait);
37470 - atomic_set(&idev->event, 0);
37471 + atomic_set_unchecked(&idev->event, 0);
37472
37473 ret = uio_get_minor(idev);
37474 if (ret)
37475 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37476 index 98b89fe..aff824e 100644
37477 --- a/drivers/usb/atm/cxacru.c
37478 +++ b/drivers/usb/atm/cxacru.c
37479 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37480 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37481 if (ret < 2)
37482 return -EINVAL;
37483 - if (index < 0 || index > 0x7f)
37484 + if (index > 0x7f)
37485 return -EINVAL;
37486 pos += tmp;
37487
37488 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37489 index d3448ca..d2864ca 100644
37490 --- a/drivers/usb/atm/usbatm.c
37491 +++ b/drivers/usb/atm/usbatm.c
37492 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37493 if (printk_ratelimit())
37494 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37495 __func__, vpi, vci);
37496 - atomic_inc(&vcc->stats->rx_err);
37497 + atomic_inc_unchecked(&vcc->stats->rx_err);
37498 return;
37499 }
37500
37501 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37502 if (length > ATM_MAX_AAL5_PDU) {
37503 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37504 __func__, length, vcc);
37505 - atomic_inc(&vcc->stats->rx_err);
37506 + atomic_inc_unchecked(&vcc->stats->rx_err);
37507 goto out;
37508 }
37509
37510 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37511 if (sarb->len < pdu_length) {
37512 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37513 __func__, pdu_length, sarb->len, vcc);
37514 - atomic_inc(&vcc->stats->rx_err);
37515 + atomic_inc_unchecked(&vcc->stats->rx_err);
37516 goto out;
37517 }
37518
37519 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37520 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37521 __func__, vcc);
37522 - atomic_inc(&vcc->stats->rx_err);
37523 + atomic_inc_unchecked(&vcc->stats->rx_err);
37524 goto out;
37525 }
37526
37527 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37528 if (printk_ratelimit())
37529 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37530 __func__, length);
37531 - atomic_inc(&vcc->stats->rx_drop);
37532 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37533 goto out;
37534 }
37535
37536 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37537
37538 vcc->push(vcc, skb);
37539
37540 - atomic_inc(&vcc->stats->rx);
37541 + atomic_inc_unchecked(&vcc->stats->rx);
37542 out:
37543 skb_trim(sarb, 0);
37544 }
37545 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37546 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37547
37548 usbatm_pop(vcc, skb);
37549 - atomic_inc(&vcc->stats->tx);
37550 + atomic_inc_unchecked(&vcc->stats->tx);
37551
37552 skb = skb_dequeue(&instance->sndqueue);
37553 }
37554 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37555 if (!left--)
37556 return sprintf(page,
37557 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37558 - atomic_read(&atm_dev->stats.aal5.tx),
37559 - atomic_read(&atm_dev->stats.aal5.tx_err),
37560 - atomic_read(&atm_dev->stats.aal5.rx),
37561 - atomic_read(&atm_dev->stats.aal5.rx_err),
37562 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37563 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37564 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37565 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37566 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37567 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37568
37569 if (!left--) {
37570 if (instance->disconnected)
37571 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37572 index d956965..4179a77 100644
37573 --- a/drivers/usb/core/devices.c
37574 +++ b/drivers/usb/core/devices.c
37575 @@ -126,7 +126,7 @@ static const char format_endpt[] =
37576 * time it gets called.
37577 */
37578 static struct device_connect_event {
37579 - atomic_t count;
37580 + atomic_unchecked_t count;
37581 wait_queue_head_t wait;
37582 } device_event = {
37583 .count = ATOMIC_INIT(1),
37584 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37585
37586 void usbfs_conn_disc_event(void)
37587 {
37588 - atomic_add(2, &device_event.count);
37589 + atomic_add_unchecked(2, &device_event.count);
37590 wake_up(&device_event.wait);
37591 }
37592
37593 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
37594
37595 poll_wait(file, &device_event.wait, wait);
37596
37597 - event_count = atomic_read(&device_event.count);
37598 + event_count = atomic_read_unchecked(&device_event.count);
37599 if (file->f_version != event_count) {
37600 file->f_version = event_count;
37601 return POLLIN | POLLRDNORM;
37602 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
37603 index b3bdfed..a9460e0 100644
37604 --- a/drivers/usb/core/message.c
37605 +++ b/drivers/usb/core/message.c
37606 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
37607 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37608 if (buf) {
37609 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37610 - if (len > 0) {
37611 - smallbuf = kmalloc(++len, GFP_NOIO);
37612 + if (len++ > 0) {
37613 + smallbuf = kmalloc(len, GFP_NOIO);
37614 if (!smallbuf)
37615 return buf;
37616 memcpy(smallbuf, buf, len);
37617 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
37618 index 1fc8f12..20647c1 100644
37619 --- a/drivers/usb/early/ehci-dbgp.c
37620 +++ b/drivers/usb/early/ehci-dbgp.c
37621 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
37622
37623 #ifdef CONFIG_KGDB
37624 static struct kgdb_io kgdbdbgp_io_ops;
37625 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
37626 +static struct kgdb_io kgdbdbgp_io_ops_console;
37627 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
37628 #else
37629 #define dbgp_kgdb_mode (0)
37630 #endif
37631 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
37632 .write_char = kgdbdbgp_write_char,
37633 };
37634
37635 +static struct kgdb_io kgdbdbgp_io_ops_console = {
37636 + .name = "kgdbdbgp",
37637 + .read_char = kgdbdbgp_read_char,
37638 + .write_char = kgdbdbgp_write_char,
37639 + .is_console = 1
37640 +};
37641 +
37642 static int kgdbdbgp_wait_time;
37643
37644 static int __init kgdbdbgp_parse_config(char *str)
37645 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
37646 ptr++;
37647 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
37648 }
37649 - kgdb_register_io_module(&kgdbdbgp_io_ops);
37650 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
37651 + if (early_dbgp_console.index != -1)
37652 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
37653 + else
37654 + kgdb_register_io_module(&kgdbdbgp_io_ops);
37655
37656 return 0;
37657 }
37658 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
37659 index d6bea3e..60b250e 100644
37660 --- a/drivers/usb/wusbcore/wa-hc.h
37661 +++ b/drivers/usb/wusbcore/wa-hc.h
37662 @@ -192,7 +192,7 @@ struct wahc {
37663 struct list_head xfer_delayed_list;
37664 spinlock_t xfer_list_lock;
37665 struct work_struct xfer_work;
37666 - atomic_t xfer_id_count;
37667 + atomic_unchecked_t xfer_id_count;
37668 };
37669
37670
37671 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
37672 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37673 spin_lock_init(&wa->xfer_list_lock);
37674 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37675 - atomic_set(&wa->xfer_id_count, 1);
37676 + atomic_set_unchecked(&wa->xfer_id_count, 1);
37677 }
37678
37679 /**
37680 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
37681 index 57c01ab..8a05959 100644
37682 --- a/drivers/usb/wusbcore/wa-xfer.c
37683 +++ b/drivers/usb/wusbcore/wa-xfer.c
37684 @@ -296,7 +296,7 @@ out:
37685 */
37686 static void wa_xfer_id_init(struct wa_xfer *xfer)
37687 {
37688 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37689 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37690 }
37691
37692 /*
37693 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
37694 index c14c42b..f955cc2 100644
37695 --- a/drivers/vhost/vhost.c
37696 +++ b/drivers/vhost/vhost.c
37697 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
37698 return 0;
37699 }
37700
37701 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
37702 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
37703 {
37704 struct file *eventfp, *filep = NULL,
37705 *pollstart = NULL, *pollstop = NULL;
37706 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
37707 index b0b2ac3..89a4399 100644
37708 --- a/drivers/video/aty/aty128fb.c
37709 +++ b/drivers/video/aty/aty128fb.c
37710 @@ -148,7 +148,7 @@ enum {
37711 };
37712
37713 /* Must match above enum */
37714 -static const char *r128_family[] __devinitdata = {
37715 +static const char *r128_family[] __devinitconst = {
37716 "AGP",
37717 "PCI",
37718 "PRO AGP",
37719 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
37720 index 5c3960d..15cf8fc 100644
37721 --- a/drivers/video/fbcmap.c
37722 +++ b/drivers/video/fbcmap.c
37723 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
37724 rc = -ENODEV;
37725 goto out;
37726 }
37727 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
37728 - !info->fbops->fb_setcmap)) {
37729 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
37730 rc = -EINVAL;
37731 goto out1;
37732 }
37733 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
37734 index ac9141b..9f07583 100644
37735 --- a/drivers/video/fbmem.c
37736 +++ b/drivers/video/fbmem.c
37737 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37738 image->dx += image->width + 8;
37739 }
37740 } else if (rotate == FB_ROTATE_UD) {
37741 - for (x = 0; x < num && image->dx >= 0; x++) {
37742 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
37743 info->fbops->fb_imageblit(info, image);
37744 image->dx -= image->width + 8;
37745 }
37746 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37747 image->dy += image->height + 8;
37748 }
37749 } else if (rotate == FB_ROTATE_CCW) {
37750 - for (x = 0; x < num && image->dy >= 0; x++) {
37751 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
37752 info->fbops->fb_imageblit(info, image);
37753 image->dy -= image->height + 8;
37754 }
37755 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
37756 return -EFAULT;
37757 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
37758 return -EINVAL;
37759 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
37760 + if (con2fb.framebuffer >= FB_MAX)
37761 return -EINVAL;
37762 if (!registered_fb[con2fb.framebuffer])
37763 request_module("fb%d", con2fb.framebuffer);
37764 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
37765 index 5a5d092..265c5ed 100644
37766 --- a/drivers/video/geode/gx1fb_core.c
37767 +++ b/drivers/video/geode/gx1fb_core.c
37768 @@ -29,7 +29,7 @@ static int crt_option = 1;
37769 static char panel_option[32] = "";
37770
37771 /* Modes relevant to the GX1 (taken from modedb.c) */
37772 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
37773 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
37774 /* 640x480-60 VESA */
37775 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
37776 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
37777 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
37778 index 0fad23f..0e9afa4 100644
37779 --- a/drivers/video/gxt4500.c
37780 +++ b/drivers/video/gxt4500.c
37781 @@ -156,7 +156,7 @@ struct gxt4500_par {
37782 static char *mode_option;
37783
37784 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
37785 -static const struct fb_videomode defaultmode __devinitdata = {
37786 +static const struct fb_videomode defaultmode __devinitconst = {
37787 .refresh = 60,
37788 .xres = 1280,
37789 .yres = 1024,
37790 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
37791 return 0;
37792 }
37793
37794 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
37795 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
37796 .id = "IBM GXT4500P",
37797 .type = FB_TYPE_PACKED_PIXELS,
37798 .visual = FB_VISUAL_PSEUDOCOLOR,
37799 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
37800 index 7672d2e..b56437f 100644
37801 --- a/drivers/video/i810/i810_accel.c
37802 +++ b/drivers/video/i810/i810_accel.c
37803 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
37804 }
37805 }
37806 printk("ringbuffer lockup!!!\n");
37807 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
37808 i810_report_error(mmio);
37809 par->dev_flags |= LOCKUP;
37810 info->pixmap.scan_align = 1;
37811 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
37812 index b83f361..2b05a91 100644
37813 --- a/drivers/video/i810/i810_main.c
37814 +++ b/drivers/video/i810/i810_main.c
37815 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
37816 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
37817
37818 /* PCI */
37819 -static const char *i810_pci_list[] __devinitdata = {
37820 +static const char *i810_pci_list[] __devinitconst = {
37821 "Intel(R) 810 Framebuffer Device" ,
37822 "Intel(R) 810-DC100 Framebuffer Device" ,
37823 "Intel(R) 810E Framebuffer Device" ,
37824 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
37825 index de36693..3c63fc2 100644
37826 --- a/drivers/video/jz4740_fb.c
37827 +++ b/drivers/video/jz4740_fb.c
37828 @@ -136,7 +136,7 @@ struct jzfb {
37829 uint32_t pseudo_palette[16];
37830 };
37831
37832 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
37833 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
37834 .id = "JZ4740 FB",
37835 .type = FB_TYPE_PACKED_PIXELS,
37836 .visual = FB_VISUAL_TRUECOLOR,
37837 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
37838 index 3c14e43..eafa544 100644
37839 --- a/drivers/video/logo/logo_linux_clut224.ppm
37840 +++ b/drivers/video/logo/logo_linux_clut224.ppm
37841 @@ -1,1604 +1,1123 @@
37842 P3
37843 -# Standard 224-color Linux logo
37844 80 80
37845 255
37846 - 0 0 0 0 0 0 0 0 0 0 0 0
37847 - 0 0 0 0 0 0 0 0 0 0 0 0
37848 - 0 0 0 0 0 0 0 0 0 0 0 0
37849 - 0 0 0 0 0 0 0 0 0 0 0 0
37850 - 0 0 0 0 0 0 0 0 0 0 0 0
37851 - 0 0 0 0 0 0 0 0 0 0 0 0
37852 - 0 0 0 0 0 0 0 0 0 0 0 0
37853 - 0 0 0 0 0 0 0 0 0 0 0 0
37854 - 0 0 0 0 0 0 0 0 0 0 0 0
37855 - 6 6 6 6 6 6 10 10 10 10 10 10
37856 - 10 10 10 6 6 6 6 6 6 6 6 6
37857 - 0 0 0 0 0 0 0 0 0 0 0 0
37858 - 0 0 0 0 0 0 0 0 0 0 0 0
37859 - 0 0 0 0 0 0 0 0 0 0 0 0
37860 - 0 0 0 0 0 0 0 0 0 0 0 0
37861 - 0 0 0 0 0 0 0 0 0 0 0 0
37862 - 0 0 0 0 0 0 0 0 0 0 0 0
37863 - 0 0 0 0 0 0 0 0 0 0 0 0
37864 - 0 0 0 0 0 0 0 0 0 0 0 0
37865 - 0 0 0 0 0 0 0 0 0 0 0 0
37866 - 0 0 0 0 0 0 0 0 0 0 0 0
37867 - 0 0 0 0 0 0 0 0 0 0 0 0
37868 - 0 0 0 0 0 0 0 0 0 0 0 0
37869 - 0 0 0 0 0 0 0 0 0 0 0 0
37870 - 0 0 0 0 0 0 0 0 0 0 0 0
37871 - 0 0 0 0 0 0 0 0 0 0 0 0
37872 - 0 0 0 0 0 0 0 0 0 0 0 0
37873 - 0 0 0 0 0 0 0 0 0 0 0 0
37874 - 0 0 0 6 6 6 10 10 10 14 14 14
37875 - 22 22 22 26 26 26 30 30 30 34 34 34
37876 - 30 30 30 30 30 30 26 26 26 18 18 18
37877 - 14 14 14 10 10 10 6 6 6 0 0 0
37878 - 0 0 0 0 0 0 0 0 0 0 0 0
37879 - 0 0 0 0 0 0 0 0 0 0 0 0
37880 - 0 0 0 0 0 0 0 0 0 0 0 0
37881 - 0 0 0 0 0 0 0 0 0 0 0 0
37882 - 0 0 0 0 0 0 0 0 0 0 0 0
37883 - 0 0 0 0 0 0 0 0 0 0 0 0
37884 - 0 0 0 0 0 0 0 0 0 0 0 0
37885 - 0 0 0 0 0 0 0 0 0 0 0 0
37886 - 0 0 0 0 0 0 0 0 0 0 0 0
37887 - 0 0 0 0 0 1 0 0 1 0 0 0
37888 - 0 0 0 0 0 0 0 0 0 0 0 0
37889 - 0 0 0 0 0 0 0 0 0 0 0 0
37890 - 0 0 0 0 0 0 0 0 0 0 0 0
37891 - 0 0 0 0 0 0 0 0 0 0 0 0
37892 - 0 0 0 0 0 0 0 0 0 0 0 0
37893 - 0 0 0 0 0 0 0 0 0 0 0 0
37894 - 6 6 6 14 14 14 26 26 26 42 42 42
37895 - 54 54 54 66 66 66 78 78 78 78 78 78
37896 - 78 78 78 74 74 74 66 66 66 54 54 54
37897 - 42 42 42 26 26 26 18 18 18 10 10 10
37898 - 6 6 6 0 0 0 0 0 0 0 0 0
37899 - 0 0 0 0 0 0 0 0 0 0 0 0
37900 - 0 0 0 0 0 0 0 0 0 0 0 0
37901 - 0 0 0 0 0 0 0 0 0 0 0 0
37902 - 0 0 0 0 0 0 0 0 0 0 0 0
37903 - 0 0 0 0 0 0 0 0 0 0 0 0
37904 - 0 0 0 0 0 0 0 0 0 0 0 0
37905 - 0 0 0 0 0 0 0 0 0 0 0 0
37906 - 0 0 0 0 0 0 0 0 0 0 0 0
37907 - 0 0 1 0 0 0 0 0 0 0 0 0
37908 - 0 0 0 0 0 0 0 0 0 0 0 0
37909 - 0 0 0 0 0 0 0 0 0 0 0 0
37910 - 0 0 0 0 0 0 0 0 0 0 0 0
37911 - 0 0 0 0 0 0 0 0 0 0 0 0
37912 - 0 0 0 0 0 0 0 0 0 0 0 0
37913 - 0 0 0 0 0 0 0 0 0 10 10 10
37914 - 22 22 22 42 42 42 66 66 66 86 86 86
37915 - 66 66 66 38 38 38 38 38 38 22 22 22
37916 - 26 26 26 34 34 34 54 54 54 66 66 66
37917 - 86 86 86 70 70 70 46 46 46 26 26 26
37918 - 14 14 14 6 6 6 0 0 0 0 0 0
37919 - 0 0 0 0 0 0 0 0 0 0 0 0
37920 - 0 0 0 0 0 0 0 0 0 0 0 0
37921 - 0 0 0 0 0 0 0 0 0 0 0 0
37922 - 0 0 0 0 0 0 0 0 0 0 0 0
37923 - 0 0 0 0 0 0 0 0 0 0 0 0
37924 - 0 0 0 0 0 0 0 0 0 0 0 0
37925 - 0 0 0 0 0 0 0 0 0 0 0 0
37926 - 0 0 0 0 0 0 0 0 0 0 0 0
37927 - 0 0 1 0 0 1 0 0 1 0 0 0
37928 - 0 0 0 0 0 0 0 0 0 0 0 0
37929 - 0 0 0 0 0 0 0 0 0 0 0 0
37930 - 0 0 0 0 0 0 0 0 0 0 0 0
37931 - 0 0 0 0 0 0 0 0 0 0 0 0
37932 - 0 0 0 0 0 0 0 0 0 0 0 0
37933 - 0 0 0 0 0 0 10 10 10 26 26 26
37934 - 50 50 50 82 82 82 58 58 58 6 6 6
37935 - 2 2 6 2 2 6 2 2 6 2 2 6
37936 - 2 2 6 2 2 6 2 2 6 2 2 6
37937 - 6 6 6 54 54 54 86 86 86 66 66 66
37938 - 38 38 38 18 18 18 6 6 6 0 0 0
37939 - 0 0 0 0 0 0 0 0 0 0 0 0
37940 - 0 0 0 0 0 0 0 0 0 0 0 0
37941 - 0 0 0 0 0 0 0 0 0 0 0 0
37942 - 0 0 0 0 0 0 0 0 0 0 0 0
37943 - 0 0 0 0 0 0 0 0 0 0 0 0
37944 - 0 0 0 0 0 0 0 0 0 0 0 0
37945 - 0 0 0 0 0 0 0 0 0 0 0 0
37946 - 0 0 0 0 0 0 0 0 0 0 0 0
37947 - 0 0 0 0 0 0 0 0 0 0 0 0
37948 - 0 0 0 0 0 0 0 0 0 0 0 0
37949 - 0 0 0 0 0 0 0 0 0 0 0 0
37950 - 0 0 0 0 0 0 0 0 0 0 0 0
37951 - 0 0 0 0 0 0 0 0 0 0 0 0
37952 - 0 0 0 0 0 0 0 0 0 0 0 0
37953 - 0 0 0 6 6 6 22 22 22 50 50 50
37954 - 78 78 78 34 34 34 2 2 6 2 2 6
37955 - 2 2 6 2 2 6 2 2 6 2 2 6
37956 - 2 2 6 2 2 6 2 2 6 2 2 6
37957 - 2 2 6 2 2 6 6 6 6 70 70 70
37958 - 78 78 78 46 46 46 22 22 22 6 6 6
37959 - 0 0 0 0 0 0 0 0 0 0 0 0
37960 - 0 0 0 0 0 0 0 0 0 0 0 0
37961 - 0 0 0 0 0 0 0 0 0 0 0 0
37962 - 0 0 0 0 0 0 0 0 0 0 0 0
37963 - 0 0 0 0 0 0 0 0 0 0 0 0
37964 - 0 0 0 0 0 0 0 0 0 0 0 0
37965 - 0 0 0 0 0 0 0 0 0 0 0 0
37966 - 0 0 0 0 0 0 0 0 0 0 0 0
37967 - 0 0 1 0 0 1 0 0 1 0 0 0
37968 - 0 0 0 0 0 0 0 0 0 0 0 0
37969 - 0 0 0 0 0 0 0 0 0 0 0 0
37970 - 0 0 0 0 0 0 0 0 0 0 0 0
37971 - 0 0 0 0 0 0 0 0 0 0 0 0
37972 - 0 0 0 0 0 0 0 0 0 0 0 0
37973 - 6 6 6 18 18 18 42 42 42 82 82 82
37974 - 26 26 26 2 2 6 2 2 6 2 2 6
37975 - 2 2 6 2 2 6 2 2 6 2 2 6
37976 - 2 2 6 2 2 6 2 2 6 14 14 14
37977 - 46 46 46 34 34 34 6 6 6 2 2 6
37978 - 42 42 42 78 78 78 42 42 42 18 18 18
37979 - 6 6 6 0 0 0 0 0 0 0 0 0
37980 - 0 0 0 0 0 0 0 0 0 0 0 0
37981 - 0 0 0 0 0 0 0 0 0 0 0 0
37982 - 0 0 0 0 0 0 0 0 0 0 0 0
37983 - 0 0 0 0 0 0 0 0 0 0 0 0
37984 - 0 0 0 0 0 0 0 0 0 0 0 0
37985 - 0 0 0 0 0 0 0 0 0 0 0 0
37986 - 0 0 0 0 0 0 0 0 0 0 0 0
37987 - 0 0 1 0 0 0 0 0 1 0 0 0
37988 - 0 0 0 0 0 0 0 0 0 0 0 0
37989 - 0 0 0 0 0 0 0 0 0 0 0 0
37990 - 0 0 0 0 0 0 0 0 0 0 0 0
37991 - 0 0 0 0 0 0 0 0 0 0 0 0
37992 - 0 0 0 0 0 0 0 0 0 0 0 0
37993 - 10 10 10 30 30 30 66 66 66 58 58 58
37994 - 2 2 6 2 2 6 2 2 6 2 2 6
37995 - 2 2 6 2 2 6 2 2 6 2 2 6
37996 - 2 2 6 2 2 6 2 2 6 26 26 26
37997 - 86 86 86 101 101 101 46 46 46 10 10 10
37998 - 2 2 6 58 58 58 70 70 70 34 34 34
37999 - 10 10 10 0 0 0 0 0 0 0 0 0
38000 - 0 0 0 0 0 0 0 0 0 0 0 0
38001 - 0 0 0 0 0 0 0 0 0 0 0 0
38002 - 0 0 0 0 0 0 0 0 0 0 0 0
38003 - 0 0 0 0 0 0 0 0 0 0 0 0
38004 - 0 0 0 0 0 0 0 0 0 0 0 0
38005 - 0 0 0 0 0 0 0 0 0 0 0 0
38006 - 0 0 0 0 0 0 0 0 0 0 0 0
38007 - 0 0 1 0 0 1 0 0 1 0 0 0
38008 - 0 0 0 0 0 0 0 0 0 0 0 0
38009 - 0 0 0 0 0 0 0 0 0 0 0 0
38010 - 0 0 0 0 0 0 0 0 0 0 0 0
38011 - 0 0 0 0 0 0 0 0 0 0 0 0
38012 - 0 0 0 0 0 0 0 0 0 0 0 0
38013 - 14 14 14 42 42 42 86 86 86 10 10 10
38014 - 2 2 6 2 2 6 2 2 6 2 2 6
38015 - 2 2 6 2 2 6 2 2 6 2 2 6
38016 - 2 2 6 2 2 6 2 2 6 30 30 30
38017 - 94 94 94 94 94 94 58 58 58 26 26 26
38018 - 2 2 6 6 6 6 78 78 78 54 54 54
38019 - 22 22 22 6 6 6 0 0 0 0 0 0
38020 - 0 0 0 0 0 0 0 0 0 0 0 0
38021 - 0 0 0 0 0 0 0 0 0 0 0 0
38022 - 0 0 0 0 0 0 0 0 0 0 0 0
38023 - 0 0 0 0 0 0 0 0 0 0 0 0
38024 - 0 0 0 0 0 0 0 0 0 0 0 0
38025 - 0 0 0 0 0 0 0 0 0 0 0 0
38026 - 0 0 0 0 0 0 0 0 0 0 0 0
38027 - 0 0 0 0 0 0 0 0 0 0 0 0
38028 - 0 0 0 0 0 0 0 0 0 0 0 0
38029 - 0 0 0 0 0 0 0 0 0 0 0 0
38030 - 0 0 0 0 0 0 0 0 0 0 0 0
38031 - 0 0 0 0 0 0 0 0 0 0 0 0
38032 - 0 0 0 0 0 0 0 0 0 6 6 6
38033 - 22 22 22 62 62 62 62 62 62 2 2 6
38034 - 2 2 6 2 2 6 2 2 6 2 2 6
38035 - 2 2 6 2 2 6 2 2 6 2 2 6
38036 - 2 2 6 2 2 6 2 2 6 26 26 26
38037 - 54 54 54 38 38 38 18 18 18 10 10 10
38038 - 2 2 6 2 2 6 34 34 34 82 82 82
38039 - 38 38 38 14 14 14 0 0 0 0 0 0
38040 - 0 0 0 0 0 0 0 0 0 0 0 0
38041 - 0 0 0 0 0 0 0 0 0 0 0 0
38042 - 0 0 0 0 0 0 0 0 0 0 0 0
38043 - 0 0 0 0 0 0 0 0 0 0 0 0
38044 - 0 0 0 0 0 0 0 0 0 0 0 0
38045 - 0 0 0 0 0 0 0 0 0 0 0 0
38046 - 0 0 0 0 0 0 0 0 0 0 0 0
38047 - 0 0 0 0 0 1 0 0 1 0 0 0
38048 - 0 0 0 0 0 0 0 0 0 0 0 0
38049 - 0 0 0 0 0 0 0 0 0 0 0 0
38050 - 0 0 0 0 0 0 0 0 0 0 0 0
38051 - 0 0 0 0 0 0 0 0 0 0 0 0
38052 - 0 0 0 0 0 0 0 0 0 6 6 6
38053 - 30 30 30 78 78 78 30 30 30 2 2 6
38054 - 2 2 6 2 2 6 2 2 6 2 2 6
38055 - 2 2 6 2 2 6 2 2 6 2 2 6
38056 - 2 2 6 2 2 6 2 2 6 10 10 10
38057 - 10 10 10 2 2 6 2 2 6 2 2 6
38058 - 2 2 6 2 2 6 2 2 6 78 78 78
38059 - 50 50 50 18 18 18 6 6 6 0 0 0
38060 - 0 0 0 0 0 0 0 0 0 0 0 0
38061 - 0 0 0 0 0 0 0 0 0 0 0 0
38062 - 0 0 0 0 0 0 0 0 0 0 0 0
38063 - 0 0 0 0 0 0 0 0 0 0 0 0
38064 - 0 0 0 0 0 0 0 0 0 0 0 0
38065 - 0 0 0 0 0 0 0 0 0 0 0 0
38066 - 0 0 0 0 0 0 0 0 0 0 0 0
38067 - 0 0 1 0 0 0 0 0 0 0 0 0
38068 - 0 0 0 0 0 0 0 0 0 0 0 0
38069 - 0 0 0 0 0 0 0 0 0 0 0 0
38070 - 0 0 0 0 0 0 0 0 0 0 0 0
38071 - 0 0 0 0 0 0 0 0 0 0 0 0
38072 - 0 0 0 0 0 0 0 0 0 10 10 10
38073 - 38 38 38 86 86 86 14 14 14 2 2 6
38074 - 2 2 6 2 2 6 2 2 6 2 2 6
38075 - 2 2 6 2 2 6 2 2 6 2 2 6
38076 - 2 2 6 2 2 6 2 2 6 2 2 6
38077 - 2 2 6 2 2 6 2 2 6 2 2 6
38078 - 2 2 6 2 2 6 2 2 6 54 54 54
38079 - 66 66 66 26 26 26 6 6 6 0 0 0
38080 - 0 0 0 0 0 0 0 0 0 0 0 0
38081 - 0 0 0 0 0 0 0 0 0 0 0 0
38082 - 0 0 0 0 0 0 0 0 0 0 0 0
38083 - 0 0 0 0 0 0 0 0 0 0 0 0
38084 - 0 0 0 0 0 0 0 0 0 0 0 0
38085 - 0 0 0 0 0 0 0 0 0 0 0 0
38086 - 0 0 0 0 0 0 0 0 0 0 0 0
38087 - 0 0 0 0 0 1 0 0 1 0 0 0
38088 - 0 0 0 0 0 0 0 0 0 0 0 0
38089 - 0 0 0 0 0 0 0 0 0 0 0 0
38090 - 0 0 0 0 0 0 0 0 0 0 0 0
38091 - 0 0 0 0 0 0 0 0 0 0 0 0
38092 - 0 0 0 0 0 0 0 0 0 14 14 14
38093 - 42 42 42 82 82 82 2 2 6 2 2 6
38094 - 2 2 6 6 6 6 10 10 10 2 2 6
38095 - 2 2 6 2 2 6 2 2 6 2 2 6
38096 - 2 2 6 2 2 6 2 2 6 6 6 6
38097 - 14 14 14 10 10 10 2 2 6 2 2 6
38098 - 2 2 6 2 2 6 2 2 6 18 18 18
38099 - 82 82 82 34 34 34 10 10 10 0 0 0
38100 - 0 0 0 0 0 0 0 0 0 0 0 0
38101 - 0 0 0 0 0 0 0 0 0 0 0 0
38102 - 0 0 0 0 0 0 0 0 0 0 0 0
38103 - 0 0 0 0 0 0 0 0 0 0 0 0
38104 - 0 0 0 0 0 0 0 0 0 0 0 0
38105 - 0 0 0 0 0 0 0 0 0 0 0 0
38106 - 0 0 0 0 0 0 0 0 0 0 0 0
38107 - 0 0 1 0 0 0 0 0 0 0 0 0
38108 - 0 0 0 0 0 0 0 0 0 0 0 0
38109 - 0 0 0 0 0 0 0 0 0 0 0 0
38110 - 0 0 0 0 0 0 0 0 0 0 0 0
38111 - 0 0 0 0 0 0 0 0 0 0 0 0
38112 - 0 0 0 0 0 0 0 0 0 14 14 14
38113 - 46 46 46 86 86 86 2 2 6 2 2 6
38114 - 6 6 6 6 6 6 22 22 22 34 34 34
38115 - 6 6 6 2 2 6 2 2 6 2 2 6
38116 - 2 2 6 2 2 6 18 18 18 34 34 34
38117 - 10 10 10 50 50 50 22 22 22 2 2 6
38118 - 2 2 6 2 2 6 2 2 6 10 10 10
38119 - 86 86 86 42 42 42 14 14 14 0 0 0
38120 - 0 0 0 0 0 0 0 0 0 0 0 0
38121 - 0 0 0 0 0 0 0 0 0 0 0 0
38122 - 0 0 0 0 0 0 0 0 0 0 0 0
38123 - 0 0 0 0 0 0 0 0 0 0 0 0
38124 - 0 0 0 0 0 0 0 0 0 0 0 0
38125 - 0 0 0 0 0 0 0 0 0 0 0 0
38126 - 0 0 0 0 0 0 0 0 0 0 0 0
38127 - 0 0 1 0 0 1 0 0 1 0 0 0
38128 - 0 0 0 0 0 0 0 0 0 0 0 0
38129 - 0 0 0 0 0 0 0 0 0 0 0 0
38130 - 0 0 0 0 0 0 0 0 0 0 0 0
38131 - 0 0 0 0 0 0 0 0 0 0 0 0
38132 - 0 0 0 0 0 0 0 0 0 14 14 14
38133 - 46 46 46 86 86 86 2 2 6 2 2 6
38134 - 38 38 38 116 116 116 94 94 94 22 22 22
38135 - 22 22 22 2 2 6 2 2 6 2 2 6
38136 - 14 14 14 86 86 86 138 138 138 162 162 162
38137 -154 154 154 38 38 38 26 26 26 6 6 6
38138 - 2 2 6 2 2 6 2 2 6 2 2 6
38139 - 86 86 86 46 46 46 14 14 14 0 0 0
38140 - 0 0 0 0 0 0 0 0 0 0 0 0
38141 - 0 0 0 0 0 0 0 0 0 0 0 0
38142 - 0 0 0 0 0 0 0 0 0 0 0 0
38143 - 0 0 0 0 0 0 0 0 0 0 0 0
38144 - 0 0 0 0 0 0 0 0 0 0 0 0
38145 - 0 0 0 0 0 0 0 0 0 0 0 0
38146 - 0 0 0 0 0 0 0 0 0 0 0 0
38147 - 0 0 0 0 0 0 0 0 0 0 0 0
38148 - 0 0 0 0 0 0 0 0 0 0 0 0
38149 - 0 0 0 0 0 0 0 0 0 0 0 0
38150 - 0 0 0 0 0 0 0 0 0 0 0 0
38151 - 0 0 0 0 0 0 0 0 0 0 0 0
38152 - 0 0 0 0 0 0 0 0 0 14 14 14
38153 - 46 46 46 86 86 86 2 2 6 14 14 14
38154 -134 134 134 198 198 198 195 195 195 116 116 116
38155 - 10 10 10 2 2 6 2 2 6 6 6 6
38156 -101 98 89 187 187 187 210 210 210 218 218 218
38157 -214 214 214 134 134 134 14 14 14 6 6 6
38158 - 2 2 6 2 2 6 2 2 6 2 2 6
38159 - 86 86 86 50 50 50 18 18 18 6 6 6
38160 - 0 0 0 0 0 0 0 0 0 0 0 0
38161 - 0 0 0 0 0 0 0 0 0 0 0 0
38162 - 0 0 0 0 0 0 0 0 0 0 0 0
38163 - 0 0 0 0 0 0 0 0 0 0 0 0
38164 - 0 0 0 0 0 0 0 0 0 0 0 0
38165 - 0 0 0 0 0 0 0 0 0 0 0 0
38166 - 0 0 0 0 0 0 0 0 1 0 0 0
38167 - 0 0 1 0 0 1 0 0 1 0 0 0
38168 - 0 0 0 0 0 0 0 0 0 0 0 0
38169 - 0 0 0 0 0 0 0 0 0 0 0 0
38170 - 0 0 0 0 0 0 0 0 0 0 0 0
38171 - 0 0 0 0 0 0 0 0 0 0 0 0
38172 - 0 0 0 0 0 0 0 0 0 14 14 14
38173 - 46 46 46 86 86 86 2 2 6 54 54 54
38174 -218 218 218 195 195 195 226 226 226 246 246 246
38175 - 58 58 58 2 2 6 2 2 6 30 30 30
38176 -210 210 210 253 253 253 174 174 174 123 123 123
38177 -221 221 221 234 234 234 74 74 74 2 2 6
38178 - 2 2 6 2 2 6 2 2 6 2 2 6
38179 - 70 70 70 58 58 58 22 22 22 6 6 6
38180 - 0 0 0 0 0 0 0 0 0 0 0 0
38181 - 0 0 0 0 0 0 0 0 0 0 0 0
38182 - 0 0 0 0 0 0 0 0 0 0 0 0
38183 - 0 0 0 0 0 0 0 0 0 0 0 0
38184 - 0 0 0 0 0 0 0 0 0 0 0 0
38185 - 0 0 0 0 0 0 0 0 0 0 0 0
38186 - 0 0 0 0 0 0 0 0 0 0 0 0
38187 - 0 0 0 0 0 0 0 0 0 0 0 0
38188 - 0 0 0 0 0 0 0 0 0 0 0 0
38189 - 0 0 0 0 0 0 0 0 0 0 0 0
38190 - 0 0 0 0 0 0 0 0 0 0 0 0
38191 - 0 0 0 0 0 0 0 0 0 0 0 0
38192 - 0 0 0 0 0 0 0 0 0 14 14 14
38193 - 46 46 46 82 82 82 2 2 6 106 106 106
38194 -170 170 170 26 26 26 86 86 86 226 226 226
38195 -123 123 123 10 10 10 14 14 14 46 46 46
38196 -231 231 231 190 190 190 6 6 6 70 70 70
38197 - 90 90 90 238 238 238 158 158 158 2 2 6
38198 - 2 2 6 2 2 6 2 2 6 2 2 6
38199 - 70 70 70 58 58 58 22 22 22 6 6 6
38200 - 0 0 0 0 0 0 0 0 0 0 0 0
38201 - 0 0 0 0 0 0 0 0 0 0 0 0
38202 - 0 0 0 0 0 0 0 0 0 0 0 0
38203 - 0 0 0 0 0 0 0 0 0 0 0 0
38204 - 0 0 0 0 0 0 0 0 0 0 0 0
38205 - 0 0 0 0 0 0 0 0 0 0 0 0
38206 - 0 0 0 0 0 0 0 0 1 0 0 0
38207 - 0 0 1 0 0 1 0 0 1 0 0 0
38208 - 0 0 0 0 0 0 0 0 0 0 0 0
38209 - 0 0 0 0 0 0 0 0 0 0 0 0
38210 - 0 0 0 0 0 0 0 0 0 0 0 0
38211 - 0 0 0 0 0 0 0 0 0 0 0 0
38212 - 0 0 0 0 0 0 0 0 0 14 14 14
38213 - 42 42 42 86 86 86 6 6 6 116 116 116
38214 -106 106 106 6 6 6 70 70 70 149 149 149
38215 -128 128 128 18 18 18 38 38 38 54 54 54
38216 -221 221 221 106 106 106 2 2 6 14 14 14
38217 - 46 46 46 190 190 190 198 198 198 2 2 6
38218 - 2 2 6 2 2 6 2 2 6 2 2 6
38219 - 74 74 74 62 62 62 22 22 22 6 6 6
38220 - 0 0 0 0 0 0 0 0 0 0 0 0
38221 - 0 0 0 0 0 0 0 0 0 0 0 0
38222 - 0 0 0 0 0 0 0 0 0 0 0 0
38223 - 0 0 0 0 0 0 0 0 0 0 0 0
38224 - 0 0 0 0 0 0 0 0 0 0 0 0
38225 - 0 0 0 0 0 0 0 0 0 0 0 0
38226 - 0 0 0 0 0 0 0 0 1 0 0 0
38227 - 0 0 1 0 0 0 0 0 1 0 0 0
38228 - 0 0 0 0 0 0 0 0 0 0 0 0
38229 - 0 0 0 0 0 0 0 0 0 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 14 14 14
38233 - 42 42 42 94 94 94 14 14 14 101 101 101
38234 -128 128 128 2 2 6 18 18 18 116 116 116
38235 -118 98 46 121 92 8 121 92 8 98 78 10
38236 -162 162 162 106 106 106 2 2 6 2 2 6
38237 - 2 2 6 195 195 195 195 195 195 6 6 6
38238 - 2 2 6 2 2 6 2 2 6 2 2 6
38239 - 74 74 74 62 62 62 22 22 22 6 6 6
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 0 0 0
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 1 0 0 1
38247 - 0 0 1 0 0 0 0 0 1 0 0 0
38248 - 0 0 0 0 0 0 0 0 0 0 0 0
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 10 10 10
38253 - 38 38 38 90 90 90 14 14 14 58 58 58
38254 -210 210 210 26 26 26 54 38 6 154 114 10
38255 -226 170 11 236 186 11 225 175 15 184 144 12
38256 -215 174 15 175 146 61 37 26 9 2 2 6
38257 - 70 70 70 246 246 246 138 138 138 2 2 6
38258 - 2 2 6 2 2 6 2 2 6 2 2 6
38259 - 70 70 70 66 66 66 26 26 26 6 6 6
38260 - 0 0 0 0 0 0 0 0 0 0 0 0
38261 - 0 0 0 0 0 0 0 0 0 0 0 0
38262 - 0 0 0 0 0 0 0 0 0 0 0 0
38263 - 0 0 0 0 0 0 0 0 0 0 0 0
38264 - 0 0 0 0 0 0 0 0 0 0 0 0
38265 - 0 0 0 0 0 0 0 0 0 0 0 0
38266 - 0 0 0 0 0 0 0 0 0 0 0 0
38267 - 0 0 0 0 0 0 0 0 0 0 0 0
38268 - 0 0 0 0 0 0 0 0 0 0 0 0
38269 - 0 0 0 0 0 0 0 0 0 0 0 0
38270 - 0 0 0 0 0 0 0 0 0 0 0 0
38271 - 0 0 0 0 0 0 0 0 0 0 0 0
38272 - 0 0 0 0 0 0 0 0 0 10 10 10
38273 - 38 38 38 86 86 86 14 14 14 10 10 10
38274 -195 195 195 188 164 115 192 133 9 225 175 15
38275 -239 182 13 234 190 10 232 195 16 232 200 30
38276 -245 207 45 241 208 19 232 195 16 184 144 12
38277 -218 194 134 211 206 186 42 42 42 2 2 6
38278 - 2 2 6 2 2 6 2 2 6 2 2 6
38279 - 50 50 50 74 74 74 30 30 30 6 6 6
38280 - 0 0 0 0 0 0 0 0 0 0 0 0
38281 - 0 0 0 0 0 0 0 0 0 0 0 0
38282 - 0 0 0 0 0 0 0 0 0 0 0 0
38283 - 0 0 0 0 0 0 0 0 0 0 0 0
38284 - 0 0 0 0 0 0 0 0 0 0 0 0
38285 - 0 0 0 0 0 0 0 0 0 0 0 0
38286 - 0 0 0 0 0 0 0 0 0 0 0 0
38287 - 0 0 0 0 0 0 0 0 0 0 0 0
38288 - 0 0 0 0 0 0 0 0 0 0 0 0
38289 - 0 0 0 0 0 0 0 0 0 0 0 0
38290 - 0 0 0 0 0 0 0 0 0 0 0 0
38291 - 0 0 0 0 0 0 0 0 0 0 0 0
38292 - 0 0 0 0 0 0 0 0 0 10 10 10
38293 - 34 34 34 86 86 86 14 14 14 2 2 6
38294 -121 87 25 192 133 9 219 162 10 239 182 13
38295 -236 186 11 232 195 16 241 208 19 244 214 54
38296 -246 218 60 246 218 38 246 215 20 241 208 19
38297 -241 208 19 226 184 13 121 87 25 2 2 6
38298 - 2 2 6 2 2 6 2 2 6 2 2 6
38299 - 50 50 50 82 82 82 34 34 34 10 10 10
38300 - 0 0 0 0 0 0 0 0 0 0 0 0
38301 - 0 0 0 0 0 0 0 0 0 0 0 0
38302 - 0 0 0 0 0 0 0 0 0 0 0 0
38303 - 0 0 0 0 0 0 0 0 0 0 0 0
38304 - 0 0 0 0 0 0 0 0 0 0 0 0
38305 - 0 0 0 0 0 0 0 0 0 0 0 0
38306 - 0 0 0 0 0 0 0 0 0 0 0 0
38307 - 0 0 0 0 0 0 0 0 0 0 0 0
38308 - 0 0 0 0 0 0 0 0 0 0 0 0
38309 - 0 0 0 0 0 0 0 0 0 0 0 0
38310 - 0 0 0 0 0 0 0 0 0 0 0 0
38311 - 0 0 0 0 0 0 0 0 0 0 0 0
38312 - 0 0 0 0 0 0 0 0 0 10 10 10
38313 - 34 34 34 82 82 82 30 30 30 61 42 6
38314 -180 123 7 206 145 10 230 174 11 239 182 13
38315 -234 190 10 238 202 15 241 208 19 246 218 74
38316 -246 218 38 246 215 20 246 215 20 246 215 20
38317 -226 184 13 215 174 15 184 144 12 6 6 6
38318 - 2 2 6 2 2 6 2 2 6 2 2 6
38319 - 26 26 26 94 94 94 42 42 42 14 14 14
38320 - 0 0 0 0 0 0 0 0 0 0 0 0
38321 - 0 0 0 0 0 0 0 0 0 0 0 0
38322 - 0 0 0 0 0 0 0 0 0 0 0 0
38323 - 0 0 0 0 0 0 0 0 0 0 0 0
38324 - 0 0 0 0 0 0 0 0 0 0 0 0
38325 - 0 0 0 0 0 0 0 0 0 0 0 0
38326 - 0 0 0 0 0 0 0 0 0 0 0 0
38327 - 0 0 0 0 0 0 0 0 0 0 0 0
38328 - 0 0 0 0 0 0 0 0 0 0 0 0
38329 - 0 0 0 0 0 0 0 0 0 0 0 0
38330 - 0 0 0 0 0 0 0 0 0 0 0 0
38331 - 0 0 0 0 0 0 0 0 0 0 0 0
38332 - 0 0 0 0 0 0 0 0 0 10 10 10
38333 - 30 30 30 78 78 78 50 50 50 104 69 6
38334 -192 133 9 216 158 10 236 178 12 236 186 11
38335 -232 195 16 241 208 19 244 214 54 245 215 43
38336 -246 215 20 246 215 20 241 208 19 198 155 10
38337 -200 144 11 216 158 10 156 118 10 2 2 6
38338 - 2 2 6 2 2 6 2 2 6 2 2 6
38339 - 6 6 6 90 90 90 54 54 54 18 18 18
38340 - 6 6 6 0 0 0 0 0 0 0 0 0
38341 - 0 0 0 0 0 0 0 0 0 0 0 0
38342 - 0 0 0 0 0 0 0 0 0 0 0 0
38343 - 0 0 0 0 0 0 0 0 0 0 0 0
38344 - 0 0 0 0 0 0 0 0 0 0 0 0
38345 - 0 0 0 0 0 0 0 0 0 0 0 0
38346 - 0 0 0 0 0 0 0 0 0 0 0 0
38347 - 0 0 0 0 0 0 0 0 0 0 0 0
38348 - 0 0 0 0 0 0 0 0 0 0 0 0
38349 - 0 0 0 0 0 0 0 0 0 0 0 0
38350 - 0 0 0 0 0 0 0 0 0 0 0 0
38351 - 0 0 0 0 0 0 0 0 0 0 0 0
38352 - 0 0 0 0 0 0 0 0 0 10 10 10
38353 - 30 30 30 78 78 78 46 46 46 22 22 22
38354 -137 92 6 210 162 10 239 182 13 238 190 10
38355 -238 202 15 241 208 19 246 215 20 246 215 20
38356 -241 208 19 203 166 17 185 133 11 210 150 10
38357 -216 158 10 210 150 10 102 78 10 2 2 6
38358 - 6 6 6 54 54 54 14 14 14 2 2 6
38359 - 2 2 6 62 62 62 74 74 74 30 30 30
38360 - 10 10 10 0 0 0 0 0 0 0 0 0
38361 - 0 0 0 0 0 0 0 0 0 0 0 0
38362 - 0 0 0 0 0 0 0 0 0 0 0 0
38363 - 0 0 0 0 0 0 0 0 0 0 0 0
38364 - 0 0 0 0 0 0 0 0 0 0 0 0
38365 - 0 0 0 0 0 0 0 0 0 0 0 0
38366 - 0 0 0 0 0 0 0 0 0 0 0 0
38367 - 0 0 0 0 0 0 0 0 0 0 0 0
38368 - 0 0 0 0 0 0 0 0 0 0 0 0
38369 - 0 0 0 0 0 0 0 0 0 0 0 0
38370 - 0 0 0 0 0 0 0 0 0 0 0 0
38371 - 0 0 0 0 0 0 0 0 0 0 0 0
38372 - 0 0 0 0 0 0 0 0 0 10 10 10
38373 - 34 34 34 78 78 78 50 50 50 6 6 6
38374 - 94 70 30 139 102 15 190 146 13 226 184 13
38375 -232 200 30 232 195 16 215 174 15 190 146 13
38376 -168 122 10 192 133 9 210 150 10 213 154 11
38377 -202 150 34 182 157 106 101 98 89 2 2 6
38378 - 2 2 6 78 78 78 116 116 116 58 58 58
38379 - 2 2 6 22 22 22 90 90 90 46 46 46
38380 - 18 18 18 6 6 6 0 0 0 0 0 0
38381 - 0 0 0 0 0 0 0 0 0 0 0 0
38382 - 0 0 0 0 0 0 0 0 0 0 0 0
38383 - 0 0 0 0 0 0 0 0 0 0 0 0
38384 - 0 0 0 0 0 0 0 0 0 0 0 0
38385 - 0 0 0 0 0 0 0 0 0 0 0 0
38386 - 0 0 0 0 0 0 0 0 0 0 0 0
38387 - 0 0 0 0 0 0 0 0 0 0 0 0
38388 - 0 0 0 0 0 0 0 0 0 0 0 0
38389 - 0 0 0 0 0 0 0 0 0 0 0 0
38390 - 0 0 0 0 0 0 0 0 0 0 0 0
38391 - 0 0 0 0 0 0 0 0 0 0 0 0
38392 - 0 0 0 0 0 0 0 0 0 10 10 10
38393 - 38 38 38 86 86 86 50 50 50 6 6 6
38394 -128 128 128 174 154 114 156 107 11 168 122 10
38395 -198 155 10 184 144 12 197 138 11 200 144 11
38396 -206 145 10 206 145 10 197 138 11 188 164 115
38397 -195 195 195 198 198 198 174 174 174 14 14 14
38398 - 2 2 6 22 22 22 116 116 116 116 116 116
38399 - 22 22 22 2 2 6 74 74 74 70 70 70
38400 - 30 30 30 10 10 10 0 0 0 0 0 0
38401 - 0 0 0 0 0 0 0 0 0 0 0 0
38402 - 0 0 0 0 0 0 0 0 0 0 0 0
38403 - 0 0 0 0 0 0 0 0 0 0 0 0
38404 - 0 0 0 0 0 0 0 0 0 0 0 0
38405 - 0 0 0 0 0 0 0 0 0 0 0 0
38406 - 0 0 0 0 0 0 0 0 0 0 0 0
38407 - 0 0 0 0 0 0 0 0 0 0 0 0
38408 - 0 0 0 0 0 0 0 0 0 0 0 0
38409 - 0 0 0 0 0 0 0 0 0 0 0 0
38410 - 0 0 0 0 0 0 0 0 0 0 0 0
38411 - 0 0 0 0 0 0 0 0 0 0 0 0
38412 - 0 0 0 0 0 0 6 6 6 18 18 18
38413 - 50 50 50 101 101 101 26 26 26 10 10 10
38414 -138 138 138 190 190 190 174 154 114 156 107 11
38415 -197 138 11 200 144 11 197 138 11 192 133 9
38416 -180 123 7 190 142 34 190 178 144 187 187 187
38417 -202 202 202 221 221 221 214 214 214 66 66 66
38418 - 2 2 6 2 2 6 50 50 50 62 62 62
38419 - 6 6 6 2 2 6 10 10 10 90 90 90
38420 - 50 50 50 18 18 18 6 6 6 0 0 0
38421 - 0 0 0 0 0 0 0 0 0 0 0 0
38422 - 0 0 0 0 0 0 0 0 0 0 0 0
38423 - 0 0 0 0 0 0 0 0 0 0 0 0
38424 - 0 0 0 0 0 0 0 0 0 0 0 0
38425 - 0 0 0 0 0 0 0 0 0 0 0 0
38426 - 0 0 0 0 0 0 0 0 0 0 0 0
38427 - 0 0 0 0 0 0 0 0 0 0 0 0
38428 - 0 0 0 0 0 0 0 0 0 0 0 0
38429 - 0 0 0 0 0 0 0 0 0 0 0 0
38430 - 0 0 0 0 0 0 0 0 0 0 0 0
38431 - 0 0 0 0 0 0 0 0 0 0 0 0
38432 - 0 0 0 0 0 0 10 10 10 34 34 34
38433 - 74 74 74 74 74 74 2 2 6 6 6 6
38434 -144 144 144 198 198 198 190 190 190 178 166 146
38435 -154 121 60 156 107 11 156 107 11 168 124 44
38436 -174 154 114 187 187 187 190 190 190 210 210 210
38437 -246 246 246 253 253 253 253 253 253 182 182 182
38438 - 6 6 6 2 2 6 2 2 6 2 2 6
38439 - 2 2 6 2 2 6 2 2 6 62 62 62
38440 - 74 74 74 34 34 34 14 14 14 0 0 0
38441 - 0 0 0 0 0 0 0 0 0 0 0 0
38442 - 0 0 0 0 0 0 0 0 0 0 0 0
38443 - 0 0 0 0 0 0 0 0 0 0 0 0
38444 - 0 0 0 0 0 0 0 0 0 0 0 0
38445 - 0 0 0 0 0 0 0 0 0 0 0 0
38446 - 0 0 0 0 0 0 0 0 0 0 0 0
38447 - 0 0 0 0 0 0 0 0 0 0 0 0
38448 - 0 0 0 0 0 0 0 0 0 0 0 0
38449 - 0 0 0 0 0 0 0 0 0 0 0 0
38450 - 0 0 0 0 0 0 0 0 0 0 0 0
38451 - 0 0 0 0 0 0 0 0 0 0 0 0
38452 - 0 0 0 10 10 10 22 22 22 54 54 54
38453 - 94 94 94 18 18 18 2 2 6 46 46 46
38454 -234 234 234 221 221 221 190 190 190 190 190 190
38455 -190 190 190 187 187 187 187 187 187 190 190 190
38456 -190 190 190 195 195 195 214 214 214 242 242 242
38457 -253 253 253 253 253 253 253 253 253 253 253 253
38458 - 82 82 82 2 2 6 2 2 6 2 2 6
38459 - 2 2 6 2 2 6 2 2 6 14 14 14
38460 - 86 86 86 54 54 54 22 22 22 6 6 6
38461 - 0 0 0 0 0 0 0 0 0 0 0 0
38462 - 0 0 0 0 0 0 0 0 0 0 0 0
38463 - 0 0 0 0 0 0 0 0 0 0 0 0
38464 - 0 0 0 0 0 0 0 0 0 0 0 0
38465 - 0 0 0 0 0 0 0 0 0 0 0 0
38466 - 0 0 0 0 0 0 0 0 0 0 0 0
38467 - 0 0 0 0 0 0 0 0 0 0 0 0
38468 - 0 0 0 0 0 0 0 0 0 0 0 0
38469 - 0 0 0 0 0 0 0 0 0 0 0 0
38470 - 0 0 0 0 0 0 0 0 0 0 0 0
38471 - 0 0 0 0 0 0 0 0 0 0 0 0
38472 - 6 6 6 18 18 18 46 46 46 90 90 90
38473 - 46 46 46 18 18 18 6 6 6 182 182 182
38474 -253 253 253 246 246 246 206 206 206 190 190 190
38475 -190 190 190 190 190 190 190 190 190 190 190 190
38476 -206 206 206 231 231 231 250 250 250 253 253 253
38477 -253 253 253 253 253 253 253 253 253 253 253 253
38478 -202 202 202 14 14 14 2 2 6 2 2 6
38479 - 2 2 6 2 2 6 2 2 6 2 2 6
38480 - 42 42 42 86 86 86 42 42 42 18 18 18
38481 - 6 6 6 0 0 0 0 0 0 0 0 0
38482 - 0 0 0 0 0 0 0 0 0 0 0 0
38483 - 0 0 0 0 0 0 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 0 0 0
38485 - 0 0 0 0 0 0 0 0 0 0 0 0
38486 - 0 0 0 0 0 0 0 0 0 0 0 0
38487 - 0 0 0 0 0 0 0 0 0 0 0 0
38488 - 0 0 0 0 0 0 0 0 0 0 0 0
38489 - 0 0 0 0 0 0 0 0 0 0 0 0
38490 - 0 0 0 0 0 0 0 0 0 0 0 0
38491 - 0 0 0 0 0 0 0 0 0 6 6 6
38492 - 14 14 14 38 38 38 74 74 74 66 66 66
38493 - 2 2 6 6 6 6 90 90 90 250 250 250
38494 -253 253 253 253 253 253 238 238 238 198 198 198
38495 -190 190 190 190 190 190 195 195 195 221 221 221
38496 -246 246 246 253 253 253 253 253 253 253 253 253
38497 -253 253 253 253 253 253 253 253 253 253 253 253
38498 -253 253 253 82 82 82 2 2 6 2 2 6
38499 - 2 2 6 2 2 6 2 2 6 2 2 6
38500 - 2 2 6 78 78 78 70 70 70 34 34 34
38501 - 14 14 14 6 6 6 0 0 0 0 0 0
38502 - 0 0 0 0 0 0 0 0 0 0 0 0
38503 - 0 0 0 0 0 0 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 0 0 0
38505 - 0 0 0 0 0 0 0 0 0 0 0 0
38506 - 0 0 0 0 0 0 0 0 0 0 0 0
38507 - 0 0 0 0 0 0 0 0 0 0 0 0
38508 - 0 0 0 0 0 0 0 0 0 0 0 0
38509 - 0 0 0 0 0 0 0 0 0 0 0 0
38510 - 0 0 0 0 0 0 0 0 0 0 0 0
38511 - 0 0 0 0 0 0 0 0 0 14 14 14
38512 - 34 34 34 66 66 66 78 78 78 6 6 6
38513 - 2 2 6 18 18 18 218 218 218 253 253 253
38514 -253 253 253 253 253 253 253 253 253 246 246 246
38515 -226 226 226 231 231 231 246 246 246 253 253 253
38516 -253 253 253 253 253 253 253 253 253 253 253 253
38517 -253 253 253 253 253 253 253 253 253 253 253 253
38518 -253 253 253 178 178 178 2 2 6 2 2 6
38519 - 2 2 6 2 2 6 2 2 6 2 2 6
38520 - 2 2 6 18 18 18 90 90 90 62 62 62
38521 - 30 30 30 10 10 10 0 0 0 0 0 0
38522 - 0 0 0 0 0 0 0 0 0 0 0 0
38523 - 0 0 0 0 0 0 0 0 0 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 0 0 0
38525 - 0 0 0 0 0 0 0 0 0 0 0 0
38526 - 0 0 0 0 0 0 0 0 0 0 0 0
38527 - 0 0 0 0 0 0 0 0 0 0 0 0
38528 - 0 0 0 0 0 0 0 0 0 0 0 0
38529 - 0 0 0 0 0 0 0 0 0 0 0 0
38530 - 0 0 0 0 0 0 0 0 0 0 0 0
38531 - 0 0 0 0 0 0 10 10 10 26 26 26
38532 - 58 58 58 90 90 90 18 18 18 2 2 6
38533 - 2 2 6 110 110 110 253 253 253 253 253 253
38534 -253 253 253 253 253 253 253 253 253 253 253 253
38535 -250 250 250 253 253 253 253 253 253 253 253 253
38536 -253 253 253 253 253 253 253 253 253 253 253 253
38537 -253 253 253 253 253 253 253 253 253 253 253 253
38538 -253 253 253 231 231 231 18 18 18 2 2 6
38539 - 2 2 6 2 2 6 2 2 6 2 2 6
38540 - 2 2 6 2 2 6 18 18 18 94 94 94
38541 - 54 54 54 26 26 26 10 10 10 0 0 0
38542 - 0 0 0 0 0 0 0 0 0 0 0 0
38543 - 0 0 0 0 0 0 0 0 0 0 0 0
38544 - 0 0 0 0 0 0 0 0 0 0 0 0
38545 - 0 0 0 0 0 0 0 0 0 0 0 0
38546 - 0 0 0 0 0 0 0 0 0 0 0 0
38547 - 0 0 0 0 0 0 0 0 0 0 0 0
38548 - 0 0 0 0 0 0 0 0 0 0 0 0
38549 - 0 0 0 0 0 0 0 0 0 0 0 0
38550 - 0 0 0 0 0 0 0 0 0 0 0 0
38551 - 0 0 0 6 6 6 22 22 22 50 50 50
38552 - 90 90 90 26 26 26 2 2 6 2 2 6
38553 - 14 14 14 195 195 195 250 250 250 253 253 253
38554 -253 253 253 253 253 253 253 253 253 253 253 253
38555 -253 253 253 253 253 253 253 253 253 253 253 253
38556 -253 253 253 253 253 253 253 253 253 253 253 253
38557 -253 253 253 253 253 253 253 253 253 253 253 253
38558 -250 250 250 242 242 242 54 54 54 2 2 6
38559 - 2 2 6 2 2 6 2 2 6 2 2 6
38560 - 2 2 6 2 2 6 2 2 6 38 38 38
38561 - 86 86 86 50 50 50 22 22 22 6 6 6
38562 - 0 0 0 0 0 0 0 0 0 0 0 0
38563 - 0 0 0 0 0 0 0 0 0 0 0 0
38564 - 0 0 0 0 0 0 0 0 0 0 0 0
38565 - 0 0 0 0 0 0 0 0 0 0 0 0
38566 - 0 0 0 0 0 0 0 0 0 0 0 0
38567 - 0 0 0 0 0 0 0 0 0 0 0 0
38568 - 0 0 0 0 0 0 0 0 0 0 0 0
38569 - 0 0 0 0 0 0 0 0 0 0 0 0
38570 - 0 0 0 0 0 0 0 0 0 0 0 0
38571 - 6 6 6 14 14 14 38 38 38 82 82 82
38572 - 34 34 34 2 2 6 2 2 6 2 2 6
38573 - 42 42 42 195 195 195 246 246 246 253 253 253
38574 -253 253 253 253 253 253 253 253 253 250 250 250
38575 -242 242 242 242 242 242 250 250 250 253 253 253
38576 -253 253 253 253 253 253 253 253 253 253 253 253
38577 -253 253 253 250 250 250 246 246 246 238 238 238
38578 -226 226 226 231 231 231 101 101 101 6 6 6
38579 - 2 2 6 2 2 6 2 2 6 2 2 6
38580 - 2 2 6 2 2 6 2 2 6 2 2 6
38581 - 38 38 38 82 82 82 42 42 42 14 14 14
38582 - 6 6 6 0 0 0 0 0 0 0 0 0
38583 - 0 0 0 0 0 0 0 0 0 0 0 0
38584 - 0 0 0 0 0 0 0 0 0 0 0 0
38585 - 0 0 0 0 0 0 0 0 0 0 0 0
38586 - 0 0 0 0 0 0 0 0 0 0 0 0
38587 - 0 0 0 0 0 0 0 0 0 0 0 0
38588 - 0 0 0 0 0 0 0 0 0 0 0 0
38589 - 0 0 0 0 0 0 0 0 0 0 0 0
38590 - 0 0 0 0 0 0 0 0 0 0 0 0
38591 - 10 10 10 26 26 26 62 62 62 66 66 66
38592 - 2 2 6 2 2 6 2 2 6 6 6 6
38593 - 70 70 70 170 170 170 206 206 206 234 234 234
38594 -246 246 246 250 250 250 250 250 250 238 238 238
38595 -226 226 226 231 231 231 238 238 238 250 250 250
38596 -250 250 250 250 250 250 246 246 246 231 231 231
38597 -214 214 214 206 206 206 202 202 202 202 202 202
38598 -198 198 198 202 202 202 182 182 182 18 18 18
38599 - 2 2 6 2 2 6 2 2 6 2 2 6
38600 - 2 2 6 2 2 6 2 2 6 2 2 6
38601 - 2 2 6 62 62 62 66 66 66 30 30 30
38602 - 10 10 10 0 0 0 0 0 0 0 0 0
38603 - 0 0 0 0 0 0 0 0 0 0 0 0
38604 - 0 0 0 0 0 0 0 0 0 0 0 0
38605 - 0 0 0 0 0 0 0 0 0 0 0 0
38606 - 0 0 0 0 0 0 0 0 0 0 0 0
38607 - 0 0 0 0 0 0 0 0 0 0 0 0
38608 - 0 0 0 0 0 0 0 0 0 0 0 0
38609 - 0 0 0 0 0 0 0 0 0 0 0 0
38610 - 0 0 0 0 0 0 0 0 0 0 0 0
38611 - 14 14 14 42 42 42 82 82 82 18 18 18
38612 - 2 2 6 2 2 6 2 2 6 10 10 10
38613 - 94 94 94 182 182 182 218 218 218 242 242 242
38614 -250 250 250 253 253 253 253 253 253 250 250 250
38615 -234 234 234 253 253 253 253 253 253 253 253 253
38616 -253 253 253 253 253 253 253 253 253 246 246 246
38617 -238 238 238 226 226 226 210 210 210 202 202 202
38618 -195 195 195 195 195 195 210 210 210 158 158 158
38619 - 6 6 6 14 14 14 50 50 50 14 14 14
38620 - 2 2 6 2 2 6 2 2 6 2 2 6
38621 - 2 2 6 6 6 6 86 86 86 46 46 46
38622 - 18 18 18 6 6 6 0 0 0 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 0 0 0 0 0 0 0 0 0 0 0 0
38625 - 0 0 0 0 0 0 0 0 0 0 0 0
38626 - 0 0 0 0 0 0 0 0 0 0 0 0
38627 - 0 0 0 0 0 0 0 0 0 0 0 0
38628 - 0 0 0 0 0 0 0 0 0 0 0 0
38629 - 0 0 0 0 0 0 0 0 0 0 0 0
38630 - 0 0 0 0 0 0 0 0 0 6 6 6
38631 - 22 22 22 54 54 54 70 70 70 2 2 6
38632 - 2 2 6 10 10 10 2 2 6 22 22 22
38633 -166 166 166 231 231 231 250 250 250 253 253 253
38634 -253 253 253 253 253 253 253 253 253 250 250 250
38635 -242 242 242 253 253 253 253 253 253 253 253 253
38636 -253 253 253 253 253 253 253 253 253 253 253 253
38637 -253 253 253 253 253 253 253 253 253 246 246 246
38638 -231 231 231 206 206 206 198 198 198 226 226 226
38639 - 94 94 94 2 2 6 6 6 6 38 38 38
38640 - 30 30 30 2 2 6 2 2 6 2 2 6
38641 - 2 2 6 2 2 6 62 62 62 66 66 66
38642 - 26 26 26 10 10 10 0 0 0 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 0 0 0 0 0 0 0 0 0 0 0 0
38645 - 0 0 0 0 0 0 0 0 0 0 0 0
38646 - 0 0 0 0 0 0 0 0 0 0 0 0
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 10 10 10
38651 - 30 30 30 74 74 74 50 50 50 2 2 6
38652 - 26 26 26 26 26 26 2 2 6 106 106 106
38653 -238 238 238 253 253 253 253 253 253 253 253 253
38654 -253 253 253 253 253 253 253 253 253 253 253 253
38655 -253 253 253 253 253 253 253 253 253 253 253 253
38656 -253 253 253 253 253 253 253 253 253 253 253 253
38657 -253 253 253 253 253 253 253 253 253 253 253 253
38658 -253 253 253 246 246 246 218 218 218 202 202 202
38659 -210 210 210 14 14 14 2 2 6 2 2 6
38660 - 30 30 30 22 22 22 2 2 6 2 2 6
38661 - 2 2 6 2 2 6 18 18 18 86 86 86
38662 - 42 42 42 14 14 14 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 14 14 14
38671 - 42 42 42 90 90 90 22 22 22 2 2 6
38672 - 42 42 42 2 2 6 18 18 18 218 218 218
38673 -253 253 253 253 253 253 253 253 253 253 253 253
38674 -253 253 253 253 253 253 253 253 253 253 253 253
38675 -253 253 253 253 253 253 253 253 253 253 253 253
38676 -253 253 253 253 253 253 253 253 253 253 253 253
38677 -253 253 253 253 253 253 253 253 253 253 253 253
38678 -253 253 253 253 253 253 250 250 250 221 221 221
38679 -218 218 218 101 101 101 2 2 6 14 14 14
38680 - 18 18 18 38 38 38 10 10 10 2 2 6
38681 - 2 2 6 2 2 6 2 2 6 78 78 78
38682 - 58 58 58 22 22 22 6 6 6 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 0 0 0 0 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 6 6 6 18 18 18
38691 - 54 54 54 82 82 82 2 2 6 26 26 26
38692 - 22 22 22 2 2 6 123 123 123 253 253 253
38693 -253 253 253 253 253 253 253 253 253 253 253 253
38694 -253 253 253 253 253 253 253 253 253 253 253 253
38695 -253 253 253 253 253 253 253 253 253 253 253 253
38696 -253 253 253 253 253 253 253 253 253 253 253 253
38697 -253 253 253 253 253 253 253 253 253 253 253 253
38698 -253 253 253 253 253 253 253 253 253 250 250 250
38699 -238 238 238 198 198 198 6 6 6 38 38 38
38700 - 58 58 58 26 26 26 38 38 38 2 2 6
38701 - 2 2 6 2 2 6 2 2 6 46 46 46
38702 - 78 78 78 30 30 30 10 10 10 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 0 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 10 10 10 30 30 30
38711 - 74 74 74 58 58 58 2 2 6 42 42 42
38712 - 2 2 6 22 22 22 231 231 231 253 253 253
38713 -253 253 253 253 253 253 253 253 253 253 253 253
38714 -253 253 253 253 253 253 253 253 253 250 250 250
38715 -253 253 253 253 253 253 253 253 253 253 253 253
38716 -253 253 253 253 253 253 253 253 253 253 253 253
38717 -253 253 253 253 253 253 253 253 253 253 253 253
38718 -253 253 253 253 253 253 253 253 253 253 253 253
38719 -253 253 253 246 246 246 46 46 46 38 38 38
38720 - 42 42 42 14 14 14 38 38 38 14 14 14
38721 - 2 2 6 2 2 6 2 2 6 6 6 6
38722 - 86 86 86 46 46 46 14 14 14 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 0 0 0
38728 - 0 0 0 0 0 0 0 0 0 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 6 6 6 14 14 14 42 42 42
38731 - 90 90 90 18 18 18 18 18 18 26 26 26
38732 - 2 2 6 116 116 116 253 253 253 253 253 253
38733 -253 253 253 253 253 253 253 253 253 253 253 253
38734 -253 253 253 253 253 253 250 250 250 238 238 238
38735 -253 253 253 253 253 253 253 253 253 253 253 253
38736 -253 253 253 253 253 253 253 253 253 253 253 253
38737 -253 253 253 253 253 253 253 253 253 253 253 253
38738 -253 253 253 253 253 253 253 253 253 253 253 253
38739 -253 253 253 253 253 253 94 94 94 6 6 6
38740 - 2 2 6 2 2 6 10 10 10 34 34 34
38741 - 2 2 6 2 2 6 2 2 6 2 2 6
38742 - 74 74 74 58 58 58 22 22 22 6 6 6
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 10 10 10 26 26 26 66 66 66
38751 - 82 82 82 2 2 6 38 38 38 6 6 6
38752 - 14 14 14 210 210 210 253 253 253 253 253 253
38753 -253 253 253 253 253 253 253 253 253 253 253 253
38754 -253 253 253 253 253 253 246 246 246 242 242 242
38755 -253 253 253 253 253 253 253 253 253 253 253 253
38756 -253 253 253 253 253 253 253 253 253 253 253 253
38757 -253 253 253 253 253 253 253 253 253 253 253 253
38758 -253 253 253 253 253 253 253 253 253 253 253 253
38759 -253 253 253 253 253 253 144 144 144 2 2 6
38760 - 2 2 6 2 2 6 2 2 6 46 46 46
38761 - 2 2 6 2 2 6 2 2 6 2 2 6
38762 - 42 42 42 74 74 74 30 30 30 10 10 10
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 0 0 0
38768 - 0 0 0 0 0 0 0 0 0 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 6 6 6 14 14 14 42 42 42 90 90 90
38771 - 26 26 26 6 6 6 42 42 42 2 2 6
38772 - 74 74 74 250 250 250 253 253 253 253 253 253
38773 -253 253 253 253 253 253 253 253 253 253 253 253
38774 -253 253 253 253 253 253 242 242 242 242 242 242
38775 -253 253 253 253 253 253 253 253 253 253 253 253
38776 -253 253 253 253 253 253 253 253 253 253 253 253
38777 -253 253 253 253 253 253 253 253 253 253 253 253
38778 -253 253 253 253 253 253 253 253 253 253 253 253
38779 -253 253 253 253 253 253 182 182 182 2 2 6
38780 - 2 2 6 2 2 6 2 2 6 46 46 46
38781 - 2 2 6 2 2 6 2 2 6 2 2 6
38782 - 10 10 10 86 86 86 38 38 38 10 10 10
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 10 10 10 26 26 26 66 66 66 82 82 82
38791 - 2 2 6 22 22 22 18 18 18 2 2 6
38792 -149 149 149 253 253 253 253 253 253 253 253 253
38793 -253 253 253 253 253 253 253 253 253 253 253 253
38794 -253 253 253 253 253 253 234 234 234 242 242 242
38795 -253 253 253 253 253 253 253 253 253 253 253 253
38796 -253 253 253 253 253 253 253 253 253 253 253 253
38797 -253 253 253 253 253 253 253 253 253 253 253 253
38798 -253 253 253 253 253 253 253 253 253 253 253 253
38799 -253 253 253 253 253 253 206 206 206 2 2 6
38800 - 2 2 6 2 2 6 2 2 6 38 38 38
38801 - 2 2 6 2 2 6 2 2 6 2 2 6
38802 - 6 6 6 86 86 86 46 46 46 14 14 14
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 6 6 6
38810 - 18 18 18 46 46 46 86 86 86 18 18 18
38811 - 2 2 6 34 34 34 10 10 10 6 6 6
38812 -210 210 210 253 253 253 253 253 253 253 253 253
38813 -253 253 253 253 253 253 253 253 253 253 253 253
38814 -253 253 253 253 253 253 234 234 234 242 242 242
38815 -253 253 253 253 253 253 253 253 253 253 253 253
38816 -253 253 253 253 253 253 253 253 253 253 253 253
38817 -253 253 253 253 253 253 253 253 253 253 253 253
38818 -253 253 253 253 253 253 253 253 253 253 253 253
38819 -253 253 253 253 253 253 221 221 221 6 6 6
38820 - 2 2 6 2 2 6 6 6 6 30 30 30
38821 - 2 2 6 2 2 6 2 2 6 2 2 6
38822 - 2 2 6 82 82 82 54 54 54 18 18 18
38823 - 6 6 6 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 10 10 10
38830 - 26 26 26 66 66 66 62 62 62 2 2 6
38831 - 2 2 6 38 38 38 10 10 10 26 26 26
38832 -238 238 238 253 253 253 253 253 253 253 253 253
38833 -253 253 253 253 253 253 253 253 253 253 253 253
38834 -253 253 253 253 253 253 231 231 231 238 238 238
38835 -253 253 253 253 253 253 253 253 253 253 253 253
38836 -253 253 253 253 253 253 253 253 253 253 253 253
38837 -253 253 253 253 253 253 253 253 253 253 253 253
38838 -253 253 253 253 253 253 253 253 253 253 253 253
38839 -253 253 253 253 253 253 231 231 231 6 6 6
38840 - 2 2 6 2 2 6 10 10 10 30 30 30
38841 - 2 2 6 2 2 6 2 2 6 2 2 6
38842 - 2 2 6 66 66 66 58 58 58 22 22 22
38843 - 6 6 6 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 10 10 10
38850 - 38 38 38 78 78 78 6 6 6 2 2 6
38851 - 2 2 6 46 46 46 14 14 14 42 42 42
38852 -246 246 246 253 253 253 253 253 253 253 253 253
38853 -253 253 253 253 253 253 253 253 253 253 253 253
38854 -253 253 253 253 253 253 231 231 231 242 242 242
38855 -253 253 253 253 253 253 253 253 253 253 253 253
38856 -253 253 253 253 253 253 253 253 253 253 253 253
38857 -253 253 253 253 253 253 253 253 253 253 253 253
38858 -253 253 253 253 253 253 253 253 253 253 253 253
38859 -253 253 253 253 253 253 234 234 234 10 10 10
38860 - 2 2 6 2 2 6 22 22 22 14 14 14
38861 - 2 2 6 2 2 6 2 2 6 2 2 6
38862 - 2 2 6 66 66 66 62 62 62 22 22 22
38863 - 6 6 6 0 0 0 0 0 0 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 6 6 6 18 18 18
38870 - 50 50 50 74 74 74 2 2 6 2 2 6
38871 - 14 14 14 70 70 70 34 34 34 62 62 62
38872 -250 250 250 253 253 253 253 253 253 253 253 253
38873 -253 253 253 253 253 253 253 253 253 253 253 253
38874 -253 253 253 253 253 253 231 231 231 246 246 246
38875 -253 253 253 253 253 253 253 253 253 253 253 253
38876 -253 253 253 253 253 253 253 253 253 253 253 253
38877 -253 253 253 253 253 253 253 253 253 253 253 253
38878 -253 253 253 253 253 253 253 253 253 253 253 253
38879 -253 253 253 253 253 253 234 234 234 14 14 14
38880 - 2 2 6 2 2 6 30 30 30 2 2 6
38881 - 2 2 6 2 2 6 2 2 6 2 2 6
38882 - 2 2 6 66 66 66 62 62 62 22 22 22
38883 - 6 6 6 0 0 0 0 0 0 0 0 0
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 6 6 6 18 18 18
38890 - 54 54 54 62 62 62 2 2 6 2 2 6
38891 - 2 2 6 30 30 30 46 46 46 70 70 70
38892 -250 250 250 253 253 253 253 253 253 253 253 253
38893 -253 253 253 253 253 253 253 253 253 253 253 253
38894 -253 253 253 253 253 253 231 231 231 246 246 246
38895 -253 253 253 253 253 253 253 253 253 253 253 253
38896 -253 253 253 253 253 253 253 253 253 253 253 253
38897 -253 253 253 253 253 253 253 253 253 253 253 253
38898 -253 253 253 253 253 253 253 253 253 253 253 253
38899 -253 253 253 253 253 253 226 226 226 10 10 10
38900 - 2 2 6 6 6 6 30 30 30 2 2 6
38901 - 2 2 6 2 2 6 2 2 6 2 2 6
38902 - 2 2 6 66 66 66 58 58 58 22 22 22
38903 - 6 6 6 0 0 0 0 0 0 0 0 0
38904 - 0 0 0 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 6 6 6 22 22 22
38910 - 58 58 58 62 62 62 2 2 6 2 2 6
38911 - 2 2 6 2 2 6 30 30 30 78 78 78
38912 -250 250 250 253 253 253 253 253 253 253 253 253
38913 -253 253 253 253 253 253 253 253 253 253 253 253
38914 -253 253 253 253 253 253 231 231 231 246 246 246
38915 -253 253 253 253 253 253 253 253 253 253 253 253
38916 -253 253 253 253 253 253 253 253 253 253 253 253
38917 -253 253 253 253 253 253 253 253 253 253 253 253
38918 -253 253 253 253 253 253 253 253 253 253 253 253
38919 -253 253 253 253 253 253 206 206 206 2 2 6
38920 - 22 22 22 34 34 34 18 14 6 22 22 22
38921 - 26 26 26 18 18 18 6 6 6 2 2 6
38922 - 2 2 6 82 82 82 54 54 54 18 18 18
38923 - 6 6 6 0 0 0 0 0 0 0 0 0
38924 - 0 0 0 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 6 6 6 26 26 26
38930 - 62 62 62 106 106 106 74 54 14 185 133 11
38931 -210 162 10 121 92 8 6 6 6 62 62 62
38932 -238 238 238 253 253 253 253 253 253 253 253 253
38933 -253 253 253 253 253 253 253 253 253 253 253 253
38934 -253 253 253 253 253 253 231 231 231 246 246 246
38935 -253 253 253 253 253 253 253 253 253 253 253 253
38936 -253 253 253 253 253 253 253 253 253 253 253 253
38937 -253 253 253 253 253 253 253 253 253 253 253 253
38938 -253 253 253 253 253 253 253 253 253 253 253 253
38939 -253 253 253 253 253 253 158 158 158 18 18 18
38940 - 14 14 14 2 2 6 2 2 6 2 2 6
38941 - 6 6 6 18 18 18 66 66 66 38 38 38
38942 - 6 6 6 94 94 94 50 50 50 18 18 18
38943 - 6 6 6 0 0 0 0 0 0 0 0 0
38944 - 0 0 0 0 0 0 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 6 6 6
38949 - 10 10 10 10 10 10 18 18 18 38 38 38
38950 - 78 78 78 142 134 106 216 158 10 242 186 14
38951 -246 190 14 246 190 14 156 118 10 10 10 10
38952 - 90 90 90 238 238 238 253 253 253 253 253 253
38953 -253 253 253 253 253 253 253 253 253 253 253 253
38954 -253 253 253 253 253 253 231 231 231 250 250 250
38955 -253 253 253 253 253 253 253 253 253 253 253 253
38956 -253 253 253 253 253 253 253 253 253 253 253 253
38957 -253 253 253 253 253 253 253 253 253 253 253 253
38958 -253 253 253 253 253 253 253 253 253 246 230 190
38959 -238 204 91 238 204 91 181 142 44 37 26 9
38960 - 2 2 6 2 2 6 2 2 6 2 2 6
38961 - 2 2 6 2 2 6 38 38 38 46 46 46
38962 - 26 26 26 106 106 106 54 54 54 18 18 18
38963 - 6 6 6 0 0 0 0 0 0 0 0 0
38964 - 0 0 0 0 0 0 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 0 0 0 0
38968 - 0 0 0 6 6 6 14 14 14 22 22 22
38969 - 30 30 30 38 38 38 50 50 50 70 70 70
38970 -106 106 106 190 142 34 226 170 11 242 186 14
38971 -246 190 14 246 190 14 246 190 14 154 114 10
38972 - 6 6 6 74 74 74 226 226 226 253 253 253
38973 -253 253 253 253 253 253 253 253 253 253 253 253
38974 -253 253 253 253 253 253 231 231 231 250 250 250
38975 -253 253 253 253 253 253 253 253 253 253 253 253
38976 -253 253 253 253 253 253 253 253 253 253 253 253
38977 -253 253 253 253 253 253 253 253 253 253 253 253
38978 -253 253 253 253 253 253 253 253 253 228 184 62
38979 -241 196 14 241 208 19 232 195 16 38 30 10
38980 - 2 2 6 2 2 6 2 2 6 2 2 6
38981 - 2 2 6 6 6 6 30 30 30 26 26 26
38982 -203 166 17 154 142 90 66 66 66 26 26 26
38983 - 6 6 6 0 0 0 0 0 0 0 0 0
38984 - 0 0 0 0 0 0 0 0 0 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 6 6 6 18 18 18 38 38 38 58 58 58
38989 - 78 78 78 86 86 86 101 101 101 123 123 123
38990 -175 146 61 210 150 10 234 174 13 246 186 14
38991 -246 190 14 246 190 14 246 190 14 238 190 10
38992 -102 78 10 2 2 6 46 46 46 198 198 198
38993 -253 253 253 253 253 253 253 253 253 253 253 253
38994 -253 253 253 253 253 253 234 234 234 242 242 242
38995 -253 253 253 253 253 253 253 253 253 253 253 253
38996 -253 253 253 253 253 253 253 253 253 253 253 253
38997 -253 253 253 253 253 253 253 253 253 253 253 253
38998 -253 253 253 253 253 253 253 253 253 224 178 62
38999 -242 186 14 241 196 14 210 166 10 22 18 6
39000 - 2 2 6 2 2 6 2 2 6 2 2 6
39001 - 2 2 6 2 2 6 6 6 6 121 92 8
39002 -238 202 15 232 195 16 82 82 82 34 34 34
39003 - 10 10 10 0 0 0 0 0 0 0 0 0
39004 - 0 0 0 0 0 0 0 0 0 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 0 0 0 0
39008 - 14 14 14 38 38 38 70 70 70 154 122 46
39009 -190 142 34 200 144 11 197 138 11 197 138 11
39010 -213 154 11 226 170 11 242 186 14 246 190 14
39011 -246 190 14 246 190 14 246 190 14 246 190 14
39012 -225 175 15 46 32 6 2 2 6 22 22 22
39013 -158 158 158 250 250 250 253 253 253 253 253 253
39014 -253 253 253 253 253 253 253 253 253 253 253 253
39015 -253 253 253 253 253 253 253 253 253 253 253 253
39016 -253 253 253 253 253 253 253 253 253 253 253 253
39017 -253 253 253 253 253 253 253 253 253 253 253 253
39018 -253 253 253 250 250 250 242 242 242 224 178 62
39019 -239 182 13 236 186 11 213 154 11 46 32 6
39020 - 2 2 6 2 2 6 2 2 6 2 2 6
39021 - 2 2 6 2 2 6 61 42 6 225 175 15
39022 -238 190 10 236 186 11 112 100 78 42 42 42
39023 - 14 14 14 0 0 0 0 0 0 0 0 0
39024 - 0 0 0 0 0 0 0 0 0 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 6 6 6
39028 - 22 22 22 54 54 54 154 122 46 213 154 11
39029 -226 170 11 230 174 11 226 170 11 226 170 11
39030 -236 178 12 242 186 14 246 190 14 246 190 14
39031 -246 190 14 246 190 14 246 190 14 246 190 14
39032 -241 196 14 184 144 12 10 10 10 2 2 6
39033 - 6 6 6 116 116 116 242 242 242 253 253 253
39034 -253 253 253 253 253 253 253 253 253 253 253 253
39035 -253 253 253 253 253 253 253 253 253 253 253 253
39036 -253 253 253 253 253 253 253 253 253 253 253 253
39037 -253 253 253 253 253 253 253 253 253 253 253 253
39038 -253 253 253 231 231 231 198 198 198 214 170 54
39039 -236 178 12 236 178 12 210 150 10 137 92 6
39040 - 18 14 6 2 2 6 2 2 6 2 2 6
39041 - 6 6 6 70 47 6 200 144 11 236 178 12
39042 -239 182 13 239 182 13 124 112 88 58 58 58
39043 - 22 22 22 6 6 6 0 0 0 0 0 0
39044 - 0 0 0 0 0 0 0 0 0 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 0 10 10 10
39048 - 30 30 30 70 70 70 180 133 36 226 170 11
39049 -239 182 13 242 186 14 242 186 14 246 186 14
39050 -246 190 14 246 190 14 246 190 14 246 190 14
39051 -246 190 14 246 190 14 246 190 14 246 190 14
39052 -246 190 14 232 195 16 98 70 6 2 2 6
39053 - 2 2 6 2 2 6 66 66 66 221 221 221
39054 -253 253 253 253 253 253 253 253 253 253 253 253
39055 -253 253 253 253 253 253 253 253 253 253 253 253
39056 -253 253 253 253 253 253 253 253 253 253 253 253
39057 -253 253 253 253 253 253 253 253 253 253 253 253
39058 -253 253 253 206 206 206 198 198 198 214 166 58
39059 -230 174 11 230 174 11 216 158 10 192 133 9
39060 -163 110 8 116 81 8 102 78 10 116 81 8
39061 -167 114 7 197 138 11 226 170 11 239 182 13
39062 -242 186 14 242 186 14 162 146 94 78 78 78
39063 - 34 34 34 14 14 14 6 6 6 0 0 0
39064 - 0 0 0 0 0 0 0 0 0 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 6 6 6
39068 - 30 30 30 78 78 78 190 142 34 226 170 11
39069 -239 182 13 246 190 14 246 190 14 246 190 14
39070 -246 190 14 246 190 14 246 190 14 246 190 14
39071 -246 190 14 246 190 14 246 190 14 246 190 14
39072 -246 190 14 241 196 14 203 166 17 22 18 6
39073 - 2 2 6 2 2 6 2 2 6 38 38 38
39074 -218 218 218 253 253 253 253 253 253 253 253 253
39075 -253 253 253 253 253 253 253 253 253 253 253 253
39076 -253 253 253 253 253 253 253 253 253 253 253 253
39077 -253 253 253 253 253 253 253 253 253 253 253 253
39078 -250 250 250 206 206 206 198 198 198 202 162 69
39079 -226 170 11 236 178 12 224 166 10 210 150 10
39080 -200 144 11 197 138 11 192 133 9 197 138 11
39081 -210 150 10 226 170 11 242 186 14 246 190 14
39082 -246 190 14 246 186 14 225 175 15 124 112 88
39083 - 62 62 62 30 30 30 14 14 14 6 6 6
39084 - 0 0 0 0 0 0 0 0 0 0 0 0
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 10 10 10
39088 - 30 30 30 78 78 78 174 135 50 224 166 10
39089 -239 182 13 246 190 14 246 190 14 246 190 14
39090 -246 190 14 246 190 14 246 190 14 246 190 14
39091 -246 190 14 246 190 14 246 190 14 246 190 14
39092 -246 190 14 246 190 14 241 196 14 139 102 15
39093 - 2 2 6 2 2 6 2 2 6 2 2 6
39094 - 78 78 78 250 250 250 253 253 253 253 253 253
39095 -253 253 253 253 253 253 253 253 253 253 253 253
39096 -253 253 253 253 253 253 253 253 253 253 253 253
39097 -253 253 253 253 253 253 253 253 253 253 253 253
39098 -250 250 250 214 214 214 198 198 198 190 150 46
39099 -219 162 10 236 178 12 234 174 13 224 166 10
39100 -216 158 10 213 154 11 213 154 11 216 158 10
39101 -226 170 11 239 182 13 246 190 14 246 190 14
39102 -246 190 14 246 190 14 242 186 14 206 162 42
39103 -101 101 101 58 58 58 30 30 30 14 14 14
39104 - 6 6 6 0 0 0 0 0 0 0 0 0
39105 - 0 0 0 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 10 10 10
39108 - 30 30 30 74 74 74 174 135 50 216 158 10
39109 -236 178 12 246 190 14 246 190 14 246 190 14
39110 -246 190 14 246 190 14 246 190 14 246 190 14
39111 -246 190 14 246 190 14 246 190 14 246 190 14
39112 -246 190 14 246 190 14 241 196 14 226 184 13
39113 - 61 42 6 2 2 6 2 2 6 2 2 6
39114 - 22 22 22 238 238 238 253 253 253 253 253 253
39115 -253 253 253 253 253 253 253 253 253 253 253 253
39116 -253 253 253 253 253 253 253 253 253 253 253 253
39117 -253 253 253 253 253 253 253 253 253 253 253 253
39118 -253 253 253 226 226 226 187 187 187 180 133 36
39119 -216 158 10 236 178 12 239 182 13 236 178 12
39120 -230 174 11 226 170 11 226 170 11 230 174 11
39121 -236 178 12 242 186 14 246 190 14 246 190 14
39122 -246 190 14 246 190 14 246 186 14 239 182 13
39123 -206 162 42 106 106 106 66 66 66 34 34 34
39124 - 14 14 14 6 6 6 0 0 0 0 0 0
39125 - 0 0 0 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 6 6 6
39128 - 26 26 26 70 70 70 163 133 67 213 154 11
39129 -236 178 12 246 190 14 246 190 14 246 190 14
39130 -246 190 14 246 190 14 246 190 14 246 190 14
39131 -246 190 14 246 190 14 246 190 14 246 190 14
39132 -246 190 14 246 190 14 246 190 14 241 196 14
39133 -190 146 13 18 14 6 2 2 6 2 2 6
39134 - 46 46 46 246 246 246 253 253 253 253 253 253
39135 -253 253 253 253 253 253 253 253 253 253 253 253
39136 -253 253 253 253 253 253 253 253 253 253 253 253
39137 -253 253 253 253 253 253 253 253 253 253 253 253
39138 -253 253 253 221 221 221 86 86 86 156 107 11
39139 -216 158 10 236 178 12 242 186 14 246 186 14
39140 -242 186 14 239 182 13 239 182 13 242 186 14
39141 -242 186 14 246 186 14 246 190 14 246 190 14
39142 -246 190 14 246 190 14 246 190 14 246 190 14
39143 -242 186 14 225 175 15 142 122 72 66 66 66
39144 - 30 30 30 10 10 10 0 0 0 0 0 0
39145 - 0 0 0 0 0 0 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 6 6 6
39148 - 26 26 26 70 70 70 163 133 67 210 150 10
39149 -236 178 12 246 190 14 246 190 14 246 190 14
39150 -246 190 14 246 190 14 246 190 14 246 190 14
39151 -246 190 14 246 190 14 246 190 14 246 190 14
39152 -246 190 14 246 190 14 246 190 14 246 190 14
39153 -232 195 16 121 92 8 34 34 34 106 106 106
39154 -221 221 221 253 253 253 253 253 253 253 253 253
39155 -253 253 253 253 253 253 253 253 253 253 253 253
39156 -253 253 253 253 253 253 253 253 253 253 253 253
39157 -253 253 253 253 253 253 253 253 253 253 253 253
39158 -242 242 242 82 82 82 18 14 6 163 110 8
39159 -216 158 10 236 178 12 242 186 14 246 190 14
39160 -246 190 14 246 190 14 246 190 14 246 190 14
39161 -246 190 14 246 190 14 246 190 14 246 190 14
39162 -246 190 14 246 190 14 246 190 14 246 190 14
39163 -246 190 14 246 190 14 242 186 14 163 133 67
39164 - 46 46 46 18 18 18 6 6 6 0 0 0
39165 - 0 0 0 0 0 0 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 10 10 10
39168 - 30 30 30 78 78 78 163 133 67 210 150 10
39169 -236 178 12 246 186 14 246 190 14 246 190 14
39170 -246 190 14 246 190 14 246 190 14 246 190 14
39171 -246 190 14 246 190 14 246 190 14 246 190 14
39172 -246 190 14 246 190 14 246 190 14 246 190 14
39173 -241 196 14 215 174 15 190 178 144 253 253 253
39174 -253 253 253 253 253 253 253 253 253 253 253 253
39175 -253 253 253 253 253 253 253 253 253 253 253 253
39176 -253 253 253 253 253 253 253 253 253 253 253 253
39177 -253 253 253 253 253 253 253 253 253 218 218 218
39178 - 58 58 58 2 2 6 22 18 6 167 114 7
39179 -216 158 10 236 178 12 246 186 14 246 190 14
39180 -246 190 14 246 190 14 246 190 14 246 190 14
39181 -246 190 14 246 190 14 246 190 14 246 190 14
39182 -246 190 14 246 190 14 246 190 14 246 190 14
39183 -246 190 14 246 186 14 242 186 14 190 150 46
39184 - 54 54 54 22 22 22 6 6 6 0 0 0
39185 - 0 0 0 0 0 0 0 0 0 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 14 14 14
39188 - 38 38 38 86 86 86 180 133 36 213 154 11
39189 -236 178 12 246 186 14 246 190 14 246 190 14
39190 -246 190 14 246 190 14 246 190 14 246 190 14
39191 -246 190 14 246 190 14 246 190 14 246 190 14
39192 -246 190 14 246 190 14 246 190 14 246 190 14
39193 -246 190 14 232 195 16 190 146 13 214 214 214
39194 -253 253 253 253 253 253 253 253 253 253 253 253
39195 -253 253 253 253 253 253 253 253 253 253 253 253
39196 -253 253 253 253 253 253 253 253 253 253 253 253
39197 -253 253 253 250 250 250 170 170 170 26 26 26
39198 - 2 2 6 2 2 6 37 26 9 163 110 8
39199 -219 162 10 239 182 13 246 186 14 246 190 14
39200 -246 190 14 246 190 14 246 190 14 246 190 14
39201 -246 190 14 246 190 14 246 190 14 246 190 14
39202 -246 190 14 246 190 14 246 190 14 246 190 14
39203 -246 186 14 236 178 12 224 166 10 142 122 72
39204 - 46 46 46 18 18 18 6 6 6 0 0 0
39205 - 0 0 0 0 0 0 0 0 0 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 6 6 6 18 18 18
39208 - 50 50 50 109 106 95 192 133 9 224 166 10
39209 -242 186 14 246 190 14 246 190 14 246 190 14
39210 -246 190 14 246 190 14 246 190 14 246 190 14
39211 -246 190 14 246 190 14 246 190 14 246 190 14
39212 -246 190 14 246 190 14 246 190 14 246 190 14
39213 -242 186 14 226 184 13 210 162 10 142 110 46
39214 -226 226 226 253 253 253 253 253 253 253 253 253
39215 -253 253 253 253 253 253 253 253 253 253 253 253
39216 -253 253 253 253 253 253 253 253 253 253 253 253
39217 -198 198 198 66 66 66 2 2 6 2 2 6
39218 - 2 2 6 2 2 6 50 34 6 156 107 11
39219 -219 162 10 239 182 13 246 186 14 246 190 14
39220 -246 190 14 246 190 14 246 190 14 246 190 14
39221 -246 190 14 246 190 14 246 190 14 246 190 14
39222 -246 190 14 246 190 14 246 190 14 242 186 14
39223 -234 174 13 213 154 11 154 122 46 66 66 66
39224 - 30 30 30 10 10 10 0 0 0 0 0 0
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 6 6 6 22 22 22
39228 - 58 58 58 154 121 60 206 145 10 234 174 13
39229 -242 186 14 246 186 14 246 190 14 246 190 14
39230 -246 190 14 246 190 14 246 190 14 246 190 14
39231 -246 190 14 246 190 14 246 190 14 246 190 14
39232 -246 190 14 246 190 14 246 190 14 246 190 14
39233 -246 186 14 236 178 12 210 162 10 163 110 8
39234 - 61 42 6 138 138 138 218 218 218 250 250 250
39235 -253 253 253 253 253 253 253 253 253 250 250 250
39236 -242 242 242 210 210 210 144 144 144 66 66 66
39237 - 6 6 6 2 2 6 2 2 6 2 2 6
39238 - 2 2 6 2 2 6 61 42 6 163 110 8
39239 -216 158 10 236 178 12 246 190 14 246 190 14
39240 -246 190 14 246 190 14 246 190 14 246 190 14
39241 -246 190 14 246 190 14 246 190 14 246 190 14
39242 -246 190 14 239 182 13 230 174 11 216 158 10
39243 -190 142 34 124 112 88 70 70 70 38 38 38
39244 - 18 18 18 6 6 6 0 0 0 0 0 0
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 6 6 6 22 22 22
39248 - 62 62 62 168 124 44 206 145 10 224 166 10
39249 -236 178 12 239 182 13 242 186 14 242 186 14
39250 -246 186 14 246 190 14 246 190 14 246 190 14
39251 -246 190 14 246 190 14 246 190 14 246 190 14
39252 -246 190 14 246 190 14 246 190 14 246 190 14
39253 -246 190 14 236 178 12 216 158 10 175 118 6
39254 - 80 54 7 2 2 6 6 6 6 30 30 30
39255 - 54 54 54 62 62 62 50 50 50 38 38 38
39256 - 14 14 14 2 2 6 2 2 6 2 2 6
39257 - 2 2 6 2 2 6 2 2 6 2 2 6
39258 - 2 2 6 6 6 6 80 54 7 167 114 7
39259 -213 154 11 236 178 12 246 190 14 246 190 14
39260 -246 190 14 246 190 14 246 190 14 246 190 14
39261 -246 190 14 242 186 14 239 182 13 239 182 13
39262 -230 174 11 210 150 10 174 135 50 124 112 88
39263 - 82 82 82 54 54 54 34 34 34 18 18 18
39264 - 6 6 6 0 0 0 0 0 0 0 0 0
39265 - 0 0 0 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 6 6 6 18 18 18
39268 - 50 50 50 158 118 36 192 133 9 200 144 11
39269 -216 158 10 219 162 10 224 166 10 226 170 11
39270 -230 174 11 236 178 12 239 182 13 239 182 13
39271 -242 186 14 246 186 14 246 190 14 246 190 14
39272 -246 190 14 246 190 14 246 190 14 246 190 14
39273 -246 186 14 230 174 11 210 150 10 163 110 8
39274 -104 69 6 10 10 10 2 2 6 2 2 6
39275 - 2 2 6 2 2 6 2 2 6 2 2 6
39276 - 2 2 6 2 2 6 2 2 6 2 2 6
39277 - 2 2 6 2 2 6 2 2 6 2 2 6
39278 - 2 2 6 6 6 6 91 60 6 167 114 7
39279 -206 145 10 230 174 11 242 186 14 246 190 14
39280 -246 190 14 246 190 14 246 186 14 242 186 14
39281 -239 182 13 230 174 11 224 166 10 213 154 11
39282 -180 133 36 124 112 88 86 86 86 58 58 58
39283 - 38 38 38 22 22 22 10 10 10 6 6 6
39284 - 0 0 0 0 0 0 0 0 0 0 0 0
39285 - 0 0 0 0 0 0 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 14 14 14
39288 - 34 34 34 70 70 70 138 110 50 158 118 36
39289 -167 114 7 180 123 7 192 133 9 197 138 11
39290 -200 144 11 206 145 10 213 154 11 219 162 10
39291 -224 166 10 230 174 11 239 182 13 242 186 14
39292 -246 186 14 246 186 14 246 186 14 246 186 14
39293 -239 182 13 216 158 10 185 133 11 152 99 6
39294 -104 69 6 18 14 6 2 2 6 2 2 6
39295 - 2 2 6 2 2 6 2 2 6 2 2 6
39296 - 2 2 6 2 2 6 2 2 6 2 2 6
39297 - 2 2 6 2 2 6 2 2 6 2 2 6
39298 - 2 2 6 6 6 6 80 54 7 152 99 6
39299 -192 133 9 219 162 10 236 178 12 239 182 13
39300 -246 186 14 242 186 14 239 182 13 236 178 12
39301 -224 166 10 206 145 10 192 133 9 154 121 60
39302 - 94 94 94 62 62 62 42 42 42 22 22 22
39303 - 14 14 14 6 6 6 0 0 0 0 0 0
39304 - 0 0 0 0 0 0 0 0 0 0 0 0
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 6 6 6
39308 - 18 18 18 34 34 34 58 58 58 78 78 78
39309 -101 98 89 124 112 88 142 110 46 156 107 11
39310 -163 110 8 167 114 7 175 118 6 180 123 7
39311 -185 133 11 197 138 11 210 150 10 219 162 10
39312 -226 170 11 236 178 12 236 178 12 234 174 13
39313 -219 162 10 197 138 11 163 110 8 130 83 6
39314 - 91 60 6 10 10 10 2 2 6 2 2 6
39315 - 18 18 18 38 38 38 38 38 38 38 38 38
39316 - 38 38 38 38 38 38 38 38 38 38 38 38
39317 - 38 38 38 38 38 38 26 26 26 2 2 6
39318 - 2 2 6 6 6 6 70 47 6 137 92 6
39319 -175 118 6 200 144 11 219 162 10 230 174 11
39320 -234 174 13 230 174 11 219 162 10 210 150 10
39321 -192 133 9 163 110 8 124 112 88 82 82 82
39322 - 50 50 50 30 30 30 14 14 14 6 6 6
39323 - 0 0 0 0 0 0 0 0 0 0 0 0
39324 - 0 0 0 0 0 0 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 6 6 6 14 14 14 22 22 22 34 34 34
39329 - 42 42 42 58 58 58 74 74 74 86 86 86
39330 -101 98 89 122 102 70 130 98 46 121 87 25
39331 -137 92 6 152 99 6 163 110 8 180 123 7
39332 -185 133 11 197 138 11 206 145 10 200 144 11
39333 -180 123 7 156 107 11 130 83 6 104 69 6
39334 - 50 34 6 54 54 54 110 110 110 101 98 89
39335 - 86 86 86 82 82 82 78 78 78 78 78 78
39336 - 78 78 78 78 78 78 78 78 78 78 78 78
39337 - 78 78 78 82 82 82 86 86 86 94 94 94
39338 -106 106 106 101 101 101 86 66 34 124 80 6
39339 -156 107 11 180 123 7 192 133 9 200 144 11
39340 -206 145 10 200 144 11 192 133 9 175 118 6
39341 -139 102 15 109 106 95 70 70 70 42 42 42
39342 - 22 22 22 10 10 10 0 0 0 0 0 0
39343 - 0 0 0 0 0 0 0 0 0 0 0 0
39344 - 0 0 0 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 6 6 6 10 10 10
39349 - 14 14 14 22 22 22 30 30 30 38 38 38
39350 - 50 50 50 62 62 62 74 74 74 90 90 90
39351 -101 98 89 112 100 78 121 87 25 124 80 6
39352 -137 92 6 152 99 6 152 99 6 152 99 6
39353 -138 86 6 124 80 6 98 70 6 86 66 30
39354 -101 98 89 82 82 82 58 58 58 46 46 46
39355 - 38 38 38 34 34 34 34 34 34 34 34 34
39356 - 34 34 34 34 34 34 34 34 34 34 34 34
39357 - 34 34 34 34 34 34 38 38 38 42 42 42
39358 - 54 54 54 82 82 82 94 86 76 91 60 6
39359 -134 86 6 156 107 11 167 114 7 175 118 6
39360 -175 118 6 167 114 7 152 99 6 121 87 25
39361 -101 98 89 62 62 62 34 34 34 18 18 18
39362 - 6 6 6 0 0 0 0 0 0 0 0 0
39363 - 0 0 0 0 0 0 0 0 0 0 0 0
39364 - 0 0 0 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 6 6 6 6 6 6 10 10 10
39370 - 18 18 18 22 22 22 30 30 30 42 42 42
39371 - 50 50 50 66 66 66 86 86 86 101 98 89
39372 -106 86 58 98 70 6 104 69 6 104 69 6
39373 -104 69 6 91 60 6 82 62 34 90 90 90
39374 - 62 62 62 38 38 38 22 22 22 14 14 14
39375 - 10 10 10 10 10 10 10 10 10 10 10 10
39376 - 10 10 10 10 10 10 6 6 6 10 10 10
39377 - 10 10 10 10 10 10 10 10 10 14 14 14
39378 - 22 22 22 42 42 42 70 70 70 89 81 66
39379 - 80 54 7 104 69 6 124 80 6 137 92 6
39380 -134 86 6 116 81 8 100 82 52 86 86 86
39381 - 58 58 58 30 30 30 14 14 14 6 6 6
39382 - 0 0 0 0 0 0 0 0 0 0 0 0
39383 - 0 0 0 0 0 0 0 0 0 0 0 0
39384 - 0 0 0 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 6 6 6 10 10 10 14 14 14
39391 - 18 18 18 26 26 26 38 38 38 54 54 54
39392 - 70 70 70 86 86 86 94 86 76 89 81 66
39393 - 89 81 66 86 86 86 74 74 74 50 50 50
39394 - 30 30 30 14 14 14 6 6 6 0 0 0
39395 - 0 0 0 0 0 0 0 0 0 0 0 0
39396 - 0 0 0 0 0 0 0 0 0 0 0 0
39397 - 0 0 0 0 0 0 0 0 0 0 0 0
39398 - 6 6 6 18 18 18 34 34 34 58 58 58
39399 - 82 82 82 89 81 66 89 81 66 89 81 66
39400 - 94 86 66 94 86 76 74 74 74 50 50 50
39401 - 26 26 26 14 14 14 6 6 6 0 0 0
39402 - 0 0 0 0 0 0 0 0 0 0 0 0
39403 - 0 0 0 0 0 0 0 0 0 0 0 0
39404 - 0 0 0 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 6 6 6 6 6 6 14 14 14 18 18 18
39412 - 30 30 30 38 38 38 46 46 46 54 54 54
39413 - 50 50 50 42 42 42 30 30 30 18 18 18
39414 - 10 10 10 0 0 0 0 0 0 0 0 0
39415 - 0 0 0 0 0 0 0 0 0 0 0 0
39416 - 0 0 0 0 0 0 0 0 0 0 0 0
39417 - 0 0 0 0 0 0 0 0 0 0 0 0
39418 - 0 0 0 6 6 6 14 14 14 26 26 26
39419 - 38 38 38 50 50 50 58 58 58 58 58 58
39420 - 54 54 54 42 42 42 30 30 30 18 18 18
39421 - 10 10 10 0 0 0 0 0 0 0 0 0
39422 - 0 0 0 0 0 0 0 0 0 0 0 0
39423 - 0 0 0 0 0 0 0 0 0 0 0 0
39424 - 0 0 0 0 0 0 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 0 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 6 6 6
39432 - 6 6 6 10 10 10 14 14 14 18 18 18
39433 - 18 18 18 14 14 14 10 10 10 6 6 6
39434 - 0 0 0 0 0 0 0 0 0 0 0 0
39435 - 0 0 0 0 0 0 0 0 0 0 0 0
39436 - 0 0 0 0 0 0 0 0 0 0 0 0
39437 - 0 0 0 0 0 0 0 0 0 0 0 0
39438 - 0 0 0 0 0 0 0 0 0 6 6 6
39439 - 14 14 14 18 18 18 22 22 22 22 22 22
39440 - 18 18 18 14 14 14 10 10 10 6 6 6
39441 - 0 0 0 0 0 0 0 0 0 0 0 0
39442 - 0 0 0 0 0 0 0 0 0 0 0 0
39443 - 0 0 0 0 0 0 0 0 0 0 0 0
39444 - 0 0 0 0 0 0 0 0 0 0 0 0
39445 - 0 0 0 0 0 0 0 0 0 0 0 0
39446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39459 +4 4 4 4 4 4
39460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39473 +4 4 4 4 4 4
39474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39487 +4 4 4 4 4 4
39488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39501 +4 4 4 4 4 4
39502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39515 +4 4 4 4 4 4
39516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39529 +4 4 4 4 4 4
39530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39534 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39535 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39539 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39540 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39541 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39543 +4 4 4 4 4 4
39544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39548 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39549 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39550 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39553 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39554 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39555 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39556 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39557 +4 4 4 4 4 4
39558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39562 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39563 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39564 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39567 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39568 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39569 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39570 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39571 +4 4 4 4 4 4
39572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39575 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39576 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39577 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39578 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39580 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39581 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39582 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39583 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39584 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39585 +4 4 4 4 4 4
39586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39589 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39590 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39591 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39592 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39593 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39594 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39595 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39596 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39597 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39598 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39599 +4 4 4 4 4 4
39600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39603 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39604 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39605 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39606 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39607 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39608 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39609 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39610 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39611 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39612 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39613 +4 4 4 4 4 4
39614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39616 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39617 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39618 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
39619 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
39620 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
39621 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
39622 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
39623 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
39624 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
39625 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
39626 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
39627 +4 4 4 4 4 4
39628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39630 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
39631 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
39632 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
39633 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
39634 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
39635 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
39636 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
39637 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
39638 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
39639 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
39640 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
39641 +4 4 4 4 4 4
39642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39644 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
39645 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
39646 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
39647 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
39648 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
39649 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
39650 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
39651 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
39652 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
39653 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
39654 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39655 +4 4 4 4 4 4
39656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39658 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
39659 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
39660 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
39661 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
39662 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
39663 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
39664 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
39665 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
39666 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
39667 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
39668 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
39669 +4 4 4 4 4 4
39670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39671 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
39672 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
39673 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
39674 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
39675 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
39676 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
39677 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
39678 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
39679 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
39680 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
39681 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
39682 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
39683 +4 4 4 4 4 4
39684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39685 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
39686 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
39687 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
39688 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39689 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
39690 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
39691 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
39692 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
39693 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
39694 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
39695 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
39696 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
39697 +0 0 0 4 4 4
39698 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39699 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
39700 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
39701 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
39702 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
39703 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
39704 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
39705 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
39706 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
39707 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
39708 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
39709 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
39710 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
39711 +2 0 0 0 0 0
39712 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
39713 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
39714 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
39715 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
39716 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
39717 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
39718 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
39719 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
39720 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
39721 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
39722 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
39723 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
39724 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
39725 +37 38 37 0 0 0
39726 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39727 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
39728 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
39729 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
39730 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
39731 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
39732 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
39733 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
39734 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
39735 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
39736 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
39737 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
39738 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
39739 +85 115 134 4 0 0
39740 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
39741 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
39742 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
39743 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
39744 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
39745 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
39746 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
39747 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
39748 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
39749 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
39750 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
39751 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
39752 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
39753 +60 73 81 4 0 0
39754 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
39755 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
39756 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
39757 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
39758 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
39759 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
39760 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
39761 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
39762 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
39763 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
39764 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
39765 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
39766 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
39767 +16 19 21 4 0 0
39768 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
39769 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
39770 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
39771 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
39772 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
39773 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
39774 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
39775 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
39776 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
39777 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
39778 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
39779 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
39780 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
39781 +4 0 0 4 3 3
39782 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
39783 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
39784 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
39785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
39786 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
39787 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
39788 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
39789 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
39790 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
39791 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
39792 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
39793 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
39794 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
39795 +3 2 2 4 4 4
39796 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
39797 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
39798 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
39799 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39800 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
39801 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
39802 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
39803 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
39804 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
39805 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
39806 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
39807 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
39808 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
39809 +4 4 4 4 4 4
39810 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
39811 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
39812 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
39813 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
39814 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
39815 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
39816 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
39817 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
39818 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
39819 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
39820 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
39821 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
39822 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
39823 +4 4 4 4 4 4
39824 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
39825 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
39826 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
39827 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
39828 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
39829 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39830 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
39831 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
39832 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
39833 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
39834 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
39835 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
39836 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
39837 +5 5 5 5 5 5
39838 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
39839 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
39840 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
39841 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
39842 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
39843 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39844 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
39845 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
39846 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
39847 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
39848 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
39849 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
39850 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39851 +5 5 5 4 4 4
39852 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
39853 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
39854 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
39855 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
39856 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39857 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
39858 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
39859 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
39860 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
39861 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
39862 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
39863 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39865 +4 4 4 4 4 4
39866 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
39867 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
39868 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
39869 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
39870 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
39871 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39872 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39873 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
39874 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
39875 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
39876 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
39877 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
39878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39879 +4 4 4 4 4 4
39880 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
39881 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
39882 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
39883 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
39884 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39885 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
39886 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
39887 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
39888 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
39889 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
39890 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
39891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39893 +4 4 4 4 4 4
39894 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
39895 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
39896 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
39897 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
39898 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39899 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39900 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39901 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
39902 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
39903 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
39904 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
39905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39907 +4 4 4 4 4 4
39908 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
39909 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
39910 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
39911 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
39912 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39913 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
39914 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39915 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
39916 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
39917 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
39918 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39921 +4 4 4 4 4 4
39922 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
39923 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
39924 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
39925 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
39926 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39927 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
39928 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
39929 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
39930 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
39931 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
39932 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
39933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39935 +4 4 4 4 4 4
39936 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
39937 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
39938 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
39939 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
39940 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39941 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
39942 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
39943 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
39944 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
39945 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
39946 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
39947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39949 +4 4 4 4 4 4
39950 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
39951 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
39952 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
39953 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39954 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
39955 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
39956 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
39957 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
39958 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
39959 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
39960 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39963 +4 4 4 4 4 4
39964 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
39965 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
39966 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
39967 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39968 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39969 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
39970 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
39971 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
39972 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
39973 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
39974 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39977 +4 4 4 4 4 4
39978 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
39979 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
39980 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39981 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39982 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39983 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
39984 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
39985 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
39986 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
39987 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
39988 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39991 +4 4 4 4 4 4
39992 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
39993 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
39994 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
39995 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39996 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39997 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
39998 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
39999 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40000 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40001 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40002 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40005 +4 4 4 4 4 4
40006 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40007 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40008 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40009 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40010 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40011 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40012 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40013 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40014 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40015 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40016 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40019 +4 4 4 4 4 4
40020 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40021 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40022 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40023 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40024 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40025 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40026 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40027 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40028 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40029 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40030 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40033 +4 4 4 4 4 4
40034 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40035 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40036 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40037 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40038 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40039 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40040 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40041 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40042 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40043 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40044 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40047 +4 4 4 4 4 4
40048 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40049 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40050 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40051 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40052 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40053 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40054 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40055 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40056 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40057 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40058 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40061 +4 4 4 4 4 4
40062 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40063 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40064 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40065 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40066 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40067 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40068 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40069 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40070 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40071 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40072 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40075 +4 4 4 4 4 4
40076 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40077 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40078 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40079 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40080 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40081 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40082 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40083 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40084 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40085 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40086 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40089 +4 4 4 4 4 4
40090 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40091 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40092 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40093 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40094 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40095 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40096 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40097 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40098 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40099 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40100 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40103 +4 4 4 4 4 4
40104 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40105 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40106 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40107 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40108 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40109 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40110 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40111 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40112 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40113 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40114 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40117 +4 4 4 4 4 4
40118 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40119 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40120 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40121 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40122 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40123 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40124 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40125 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40126 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40127 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40128 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40131 +4 4 4 4 4 4
40132 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40133 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40134 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40135 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40136 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40137 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40138 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40139 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40140 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40141 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40142 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40145 +4 4 4 4 4 4
40146 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40147 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40148 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40149 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40150 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40151 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40152 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40153 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40154 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40155 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40156 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40159 +4 4 4 4 4 4
40160 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40161 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40162 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40163 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40164 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40165 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40166 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40167 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40168 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40169 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40170 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40173 +4 4 4 4 4 4
40174 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40175 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40176 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40177 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40178 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40179 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40180 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40181 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40182 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40183 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40184 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40187 +4 4 4 4 4 4
40188 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40189 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40190 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40191 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40192 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40193 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40194 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40195 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40196 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40197 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40198 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40201 +4 4 4 4 4 4
40202 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40203 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40204 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40205 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40206 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40207 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40208 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40209 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40210 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40211 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40212 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40215 +4 4 4 4 4 4
40216 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40217 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40218 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40219 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40220 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40221 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40222 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40223 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40224 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40225 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40226 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40229 +4 4 4 4 4 4
40230 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40231 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40232 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40233 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40234 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40235 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40236 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40237 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40238 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40239 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40240 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40243 +4 4 4 4 4 4
40244 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40245 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40246 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40247 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40248 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40249 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40250 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40251 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40252 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40253 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40254 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257 +4 4 4 4 4 4
40258 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40259 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40260 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40261 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40262 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40263 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40264 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40265 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40266 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40267 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271 +4 4 4 4 4 4
40272 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40273 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40274 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40275 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40276 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40277 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40278 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40279 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40280 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40281 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285 +4 4 4 4 4 4
40286 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40287 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40288 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40289 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40290 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40291 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40292 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40293 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40294 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40295 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299 +4 4 4 4 4 4
40300 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40301 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40302 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40303 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40304 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40305 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40306 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40307 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40308 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40309 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313 +4 4 4 4 4 4
40314 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40315 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40316 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40317 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40318 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40319 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40320 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40321 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40322 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327 +4 4 4 4 4 4
40328 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40329 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40330 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40331 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40332 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40333 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40334 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40335 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40336 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40341 +4 4 4 4 4 4
40342 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40343 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40344 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40345 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40346 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40347 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40348 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40349 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40350 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40355 +4 4 4 4 4 4
40356 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40357 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40358 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40359 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40360 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40361 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40362 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40363 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40369 +4 4 4 4 4 4
40370 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40371 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40372 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40373 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40374 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40375 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40376 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40377 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40383 +4 4 4 4 4 4
40384 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40385 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40386 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40387 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40388 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40389 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40390 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40391 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40397 +4 4 4 4 4 4
40398 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40399 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40400 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40401 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40402 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40403 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40404 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40405 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40411 +4 4 4 4 4 4
40412 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40413 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40414 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40415 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40416 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40417 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40418 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40419 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40425 +4 4 4 4 4 4
40426 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40428 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40429 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40430 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40431 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40432 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40433 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439 +4 4 4 4 4 4
40440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40443 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40444 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40445 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40446 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453 +4 4 4 4 4 4
40454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40457 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40458 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40459 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40460 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40467 +4 4 4 4 4 4
40468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40471 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40472 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40473 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40474 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40481 +4 4 4 4 4 4
40482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40486 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40487 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40488 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40495 +4 4 4 4 4 4
40496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40500 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40501 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40502 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40509 +4 4 4 4 4 4
40510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40515 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40516 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40523 +4 4 4 4 4 4
40524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40528 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40529 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40530 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40537 +4 4 4 4 4 4
40538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40542 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40543 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40551 +4 4 4 4 4 4
40552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40556 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40557 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40565 +4 4 4 4 4 4
40566 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40567 index a197731..6c3af9d 100644
40568 --- a/drivers/video/udlfb.c
40569 +++ b/drivers/video/udlfb.c
40570 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40571 dlfb_urb_completion(urb);
40572
40573 error:
40574 - atomic_add(bytes_sent, &dev->bytes_sent);
40575 - atomic_add(bytes_identical, &dev->bytes_identical);
40576 - atomic_add(width*height*2, &dev->bytes_rendered);
40577 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40578 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40579 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40580 end_cycles = get_cycles();
40581 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40582 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40583 >> 10)), /* Kcycles */
40584 &dev->cpu_kcycles_used);
40585
40586 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40587 dlfb_urb_completion(urb);
40588
40589 error:
40590 - atomic_add(bytes_sent, &dev->bytes_sent);
40591 - atomic_add(bytes_identical, &dev->bytes_identical);
40592 - atomic_add(bytes_rendered, &dev->bytes_rendered);
40593 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40594 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40595 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40596 end_cycles = get_cycles();
40597 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40598 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40599 >> 10)), /* Kcycles */
40600 &dev->cpu_kcycles_used);
40601 }
40602 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40603 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40604 struct dlfb_data *dev = fb_info->par;
40605 return snprintf(buf, PAGE_SIZE, "%u\n",
40606 - atomic_read(&dev->bytes_rendered));
40607 + atomic_read_unchecked(&dev->bytes_rendered));
40608 }
40609
40610 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40611 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40612 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40613 struct dlfb_data *dev = fb_info->par;
40614 return snprintf(buf, PAGE_SIZE, "%u\n",
40615 - atomic_read(&dev->bytes_identical));
40616 + atomic_read_unchecked(&dev->bytes_identical));
40617 }
40618
40619 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40620 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40621 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40622 struct dlfb_data *dev = fb_info->par;
40623 return snprintf(buf, PAGE_SIZE, "%u\n",
40624 - atomic_read(&dev->bytes_sent));
40625 + atomic_read_unchecked(&dev->bytes_sent));
40626 }
40627
40628 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40629 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40630 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40631 struct dlfb_data *dev = fb_info->par;
40632 return snprintf(buf, PAGE_SIZE, "%u\n",
40633 - atomic_read(&dev->cpu_kcycles_used));
40634 + atomic_read_unchecked(&dev->cpu_kcycles_used));
40635 }
40636
40637 static ssize_t edid_show(
40638 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
40639 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40640 struct dlfb_data *dev = fb_info->par;
40641
40642 - atomic_set(&dev->bytes_rendered, 0);
40643 - atomic_set(&dev->bytes_identical, 0);
40644 - atomic_set(&dev->bytes_sent, 0);
40645 - atomic_set(&dev->cpu_kcycles_used, 0);
40646 + atomic_set_unchecked(&dev->bytes_rendered, 0);
40647 + atomic_set_unchecked(&dev->bytes_identical, 0);
40648 + atomic_set_unchecked(&dev->bytes_sent, 0);
40649 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
40650
40651 return count;
40652 }
40653 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
40654 index e7f69ef..83af4fd 100644
40655 --- a/drivers/video/uvesafb.c
40656 +++ b/drivers/video/uvesafb.c
40657 @@ -19,6 +19,7 @@
40658 #include <linux/io.h>
40659 #include <linux/mutex.h>
40660 #include <linux/slab.h>
40661 +#include <linux/moduleloader.h>
40662 #include <video/edid.h>
40663 #include <video/uvesafb.h>
40664 #ifdef CONFIG_X86
40665 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
40666 NULL,
40667 };
40668
40669 - return call_usermodehelper(v86d_path, argv, envp, 1);
40670 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
40671 }
40672
40673 /*
40674 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
40675 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
40676 par->pmi_setpal = par->ypan = 0;
40677 } else {
40678 +
40679 +#ifdef CONFIG_PAX_KERNEXEC
40680 +#ifdef CONFIG_MODULES
40681 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
40682 +#endif
40683 + if (!par->pmi_code) {
40684 + par->pmi_setpal = par->ypan = 0;
40685 + return 0;
40686 + }
40687 +#endif
40688 +
40689 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
40690 + task->t.regs.edi);
40691 +
40692 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40693 + pax_open_kernel();
40694 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
40695 + pax_close_kernel();
40696 +
40697 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
40698 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
40699 +#else
40700 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
40701 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
40702 +#endif
40703 +
40704 printk(KERN_INFO "uvesafb: protected mode interface info at "
40705 "%04x:%04x\n",
40706 (u16)task->t.regs.es, (u16)task->t.regs.edi);
40707 @@ -1821,6 +1844,11 @@ out:
40708 if (par->vbe_modes)
40709 kfree(par->vbe_modes);
40710
40711 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40712 + if (par->pmi_code)
40713 + module_free_exec(NULL, par->pmi_code);
40714 +#endif
40715 +
40716 framebuffer_release(info);
40717 return err;
40718 }
40719 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
40720 kfree(par->vbe_state_orig);
40721 if (par->vbe_state_saved)
40722 kfree(par->vbe_state_saved);
40723 +
40724 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40725 + if (par->pmi_code)
40726 + module_free_exec(NULL, par->pmi_code);
40727 +#endif
40728 +
40729 }
40730
40731 framebuffer_release(info);
40732 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
40733 index 501b340..86bd4cf 100644
40734 --- a/drivers/video/vesafb.c
40735 +++ b/drivers/video/vesafb.c
40736 @@ -9,6 +9,7 @@
40737 */
40738
40739 #include <linux/module.h>
40740 +#include <linux/moduleloader.h>
40741 #include <linux/kernel.h>
40742 #include <linux/errno.h>
40743 #include <linux/string.h>
40744 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
40745 static int vram_total __initdata; /* Set total amount of memory */
40746 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
40747 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
40748 -static void (*pmi_start)(void) __read_mostly;
40749 -static void (*pmi_pal) (void) __read_mostly;
40750 +static void (*pmi_start)(void) __read_only;
40751 +static void (*pmi_pal) (void) __read_only;
40752 static int depth __read_mostly;
40753 static int vga_compat __read_mostly;
40754 /* --------------------------------------------------------------------- */
40755 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
40756 unsigned int size_vmode;
40757 unsigned int size_remap;
40758 unsigned int size_total;
40759 + void *pmi_code = NULL;
40760
40761 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
40762 return -ENODEV;
40763 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
40764 size_remap = size_total;
40765 vesafb_fix.smem_len = size_remap;
40766
40767 -#ifndef __i386__
40768 - screen_info.vesapm_seg = 0;
40769 -#endif
40770 -
40771 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
40772 printk(KERN_WARNING
40773 "vesafb: cannot reserve video memory at 0x%lx\n",
40774 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
40775 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
40776 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
40777
40778 +#ifdef __i386__
40779 +
40780 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40781 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
40782 + if (!pmi_code)
40783 +#elif !defined(CONFIG_PAX_KERNEXEC)
40784 + if (0)
40785 +#endif
40786 +
40787 +#endif
40788 + screen_info.vesapm_seg = 0;
40789 +
40790 if (screen_info.vesapm_seg) {
40791 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
40792 - screen_info.vesapm_seg,screen_info.vesapm_off);
40793 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
40794 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
40795 }
40796
40797 if (screen_info.vesapm_seg < 0xc000)
40798 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
40799
40800 if (ypan || pmi_setpal) {
40801 unsigned short *pmi_base;
40802 +
40803 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
40804 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
40805 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
40806 +
40807 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40808 + pax_open_kernel();
40809 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
40810 +#else
40811 + pmi_code = pmi_base;
40812 +#endif
40813 +
40814 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
40815 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
40816 +
40817 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40818 + pmi_start = ktva_ktla(pmi_start);
40819 + pmi_pal = ktva_ktla(pmi_pal);
40820 + pax_close_kernel();
40821 +#endif
40822 +
40823 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
40824 if (pmi_base[3]) {
40825 printk(KERN_INFO "vesafb: pmi: ports = ");
40826 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
40827 info->node, info->fix.id);
40828 return 0;
40829 err:
40830 +
40831 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40832 + module_free_exec(NULL, pmi_code);
40833 +#endif
40834 +
40835 if (info->screen_base)
40836 iounmap(info->screen_base);
40837 framebuffer_release(info);
40838 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
40839 index 88714ae..16c2e11 100644
40840 --- a/drivers/video/via/via_clock.h
40841 +++ b/drivers/video/via/via_clock.h
40842 @@ -56,7 +56,7 @@ struct via_clock {
40843
40844 void (*set_engine_pll_state)(u8 state);
40845 void (*set_engine_pll)(struct via_pll_config config);
40846 -};
40847 +} __no_const;
40848
40849
40850 static inline u32 get_pll_internal_frequency(u32 ref_freq,
40851 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
40852 index e56c934..fc22f4b 100644
40853 --- a/drivers/xen/xen-pciback/conf_space.h
40854 +++ b/drivers/xen/xen-pciback/conf_space.h
40855 @@ -44,15 +44,15 @@ struct config_field {
40856 struct {
40857 conf_dword_write write;
40858 conf_dword_read read;
40859 - } dw;
40860 + } __no_const dw;
40861 struct {
40862 conf_word_write write;
40863 conf_word_read read;
40864 - } w;
40865 + } __no_const w;
40866 struct {
40867 conf_byte_write write;
40868 conf_byte_read read;
40869 - } b;
40870 + } __no_const b;
40871 } u;
40872 struct list_head list;
40873 };
40874 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
40875 index 014c8dd..6f3dfe6 100644
40876 --- a/fs/9p/vfs_inode.c
40877 +++ b/fs/9p/vfs_inode.c
40878 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
40879 void
40880 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40881 {
40882 - char *s = nd_get_link(nd);
40883 + const char *s = nd_get_link(nd);
40884
40885 p9_debug(P9_DEBUG_VFS, " %s %s\n",
40886 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
40887 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
40888 index e95d1b6..3454244 100644
40889 --- a/fs/Kconfig.binfmt
40890 +++ b/fs/Kconfig.binfmt
40891 @@ -89,7 +89,7 @@ config HAVE_AOUT
40892
40893 config BINFMT_AOUT
40894 tristate "Kernel support for a.out and ECOFF binaries"
40895 - depends on HAVE_AOUT
40896 + depends on HAVE_AOUT && BROKEN
40897 ---help---
40898 A.out (Assembler.OUTput) is a set of formats for libraries and
40899 executables used in the earliest versions of UNIX. Linux used
40900 diff --git a/fs/aio.c b/fs/aio.c
40901 index b9d64d8..86cb1d5 100644
40902 --- a/fs/aio.c
40903 +++ b/fs/aio.c
40904 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
40905 size += sizeof(struct io_event) * nr_events;
40906 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
40907
40908 - if (nr_pages < 0)
40909 + if (nr_pages <= 0)
40910 return -EINVAL;
40911
40912 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
40913 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
40914 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
40915 {
40916 ssize_t ret;
40917 + struct iovec iovstack;
40918
40919 #ifdef CONFIG_COMPAT
40920 if (compat)
40921 ret = compat_rw_copy_check_uvector(type,
40922 (struct compat_iovec __user *)kiocb->ki_buf,
40923 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40924 + kiocb->ki_nbytes, 1, &iovstack,
40925 &kiocb->ki_iovec, 1);
40926 else
40927 #endif
40928 ret = rw_copy_check_uvector(type,
40929 (struct iovec __user *)kiocb->ki_buf,
40930 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40931 + kiocb->ki_nbytes, 1, &iovstack,
40932 &kiocb->ki_iovec, 1);
40933 if (ret < 0)
40934 goto out;
40935
40936 + if (kiocb->ki_iovec == &iovstack) {
40937 + kiocb->ki_inline_vec = iovstack;
40938 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
40939 + }
40940 kiocb->ki_nr_segs = kiocb->ki_nbytes;
40941 kiocb->ki_cur_seg = 0;
40942 /* ki_nbytes/left now reflect bytes instead of segs */
40943 diff --git a/fs/attr.c b/fs/attr.c
40944 index 95053ad..2cc93ca 100644
40945 --- a/fs/attr.c
40946 +++ b/fs/attr.c
40947 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
40948 unsigned long limit;
40949
40950 limit = rlimit(RLIMIT_FSIZE);
40951 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
40952 if (limit != RLIM_INFINITY && offset > limit)
40953 goto out_sig;
40954 if (offset > inode->i_sb->s_maxbytes)
40955 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
40956 index 9c098db..c755da5 100644
40957 --- a/fs/autofs4/waitq.c
40958 +++ b/fs/autofs4/waitq.c
40959 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
40960 {
40961 unsigned long sigpipe, flags;
40962 mm_segment_t fs;
40963 - const char *data = (const char *)addr;
40964 + const char __user *data = (const char __force_user *)addr;
40965 ssize_t wr = 0;
40966
40967 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
40968 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
40969 index 6e6d536..457113a 100644
40970 --- a/fs/befs/linuxvfs.c
40971 +++ b/fs/befs/linuxvfs.c
40972 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40973 {
40974 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
40975 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
40976 - char *link = nd_get_link(nd);
40977 + const char *link = nd_get_link(nd);
40978 if (!IS_ERR(link))
40979 kfree(link);
40980 }
40981 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
40982 index 1ff9405..f1e376a 100644
40983 --- a/fs/binfmt_aout.c
40984 +++ b/fs/binfmt_aout.c
40985 @@ -16,6 +16,7 @@
40986 #include <linux/string.h>
40987 #include <linux/fs.h>
40988 #include <linux/file.h>
40989 +#include <linux/security.h>
40990 #include <linux/stat.h>
40991 #include <linux/fcntl.h>
40992 #include <linux/ptrace.h>
40993 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
40994 #endif
40995 # define START_STACK(u) ((void __user *)u.start_stack)
40996
40997 + memset(&dump, 0, sizeof(dump));
40998 +
40999 fs = get_fs();
41000 set_fs(KERNEL_DS);
41001 has_dumped = 1;
41002 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41003
41004 /* If the size of the dump file exceeds the rlimit, then see what would happen
41005 if we wrote the stack, but not the data area. */
41006 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41007 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41008 dump.u_dsize = 0;
41009
41010 /* Make sure we have enough room to write the stack and data areas. */
41011 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41012 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41013 dump.u_ssize = 0;
41014
41015 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41016 rlim = rlimit(RLIMIT_DATA);
41017 if (rlim >= RLIM_INFINITY)
41018 rlim = ~0;
41019 +
41020 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41021 if (ex.a_data + ex.a_bss > rlim)
41022 return -ENOMEM;
41023
41024 @@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41025 install_exec_creds(bprm);
41026 current->flags &= ~PF_FORKNOEXEC;
41027
41028 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41029 + current->mm->pax_flags = 0UL;
41030 +#endif
41031 +
41032 +#ifdef CONFIG_PAX_PAGEEXEC
41033 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41034 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41035 +
41036 +#ifdef CONFIG_PAX_EMUTRAMP
41037 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41038 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41039 +#endif
41040 +
41041 +#ifdef CONFIG_PAX_MPROTECT
41042 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41043 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41044 +#endif
41045 +
41046 + }
41047 +#endif
41048 +
41049 if (N_MAGIC(ex) == OMAGIC) {
41050 unsigned long text_addr, map_size;
41051 loff_t pos;
41052 @@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41053
41054 down_write(&current->mm->mmap_sem);
41055 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41056 - PROT_READ | PROT_WRITE | PROT_EXEC,
41057 + PROT_READ | PROT_WRITE,
41058 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41059 fd_offset + ex.a_text);
41060 up_write(&current->mm->mmap_sem);
41061 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41062 index 07d096c..5e2a0b3 100644
41063 --- a/fs/binfmt_elf.c
41064 +++ b/fs/binfmt_elf.c
41065 @@ -32,6 +32,7 @@
41066 #include <linux/elf.h>
41067 #include <linux/utsname.h>
41068 #include <linux/coredump.h>
41069 +#include <linux/xattr.h>
41070 #include <asm/uaccess.h>
41071 #include <asm/param.h>
41072 #include <asm/page.h>
41073 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41074 #define elf_core_dump NULL
41075 #endif
41076
41077 +#ifdef CONFIG_PAX_MPROTECT
41078 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41079 +#endif
41080 +
41081 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41082 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41083 #else
41084 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
41085 .load_binary = load_elf_binary,
41086 .load_shlib = load_elf_library,
41087 .core_dump = elf_core_dump,
41088 +
41089 +#ifdef CONFIG_PAX_MPROTECT
41090 + .handle_mprotect= elf_handle_mprotect,
41091 +#endif
41092 +
41093 .min_coredump = ELF_EXEC_PAGESIZE,
41094 };
41095
41096 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
41097
41098 static int set_brk(unsigned long start, unsigned long end)
41099 {
41100 + unsigned long e = end;
41101 +
41102 start = ELF_PAGEALIGN(start);
41103 end = ELF_PAGEALIGN(end);
41104 if (end > start) {
41105 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
41106 if (BAD_ADDR(addr))
41107 return addr;
41108 }
41109 - current->mm->start_brk = current->mm->brk = end;
41110 + current->mm->start_brk = current->mm->brk = e;
41111 return 0;
41112 }
41113
41114 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41115 elf_addr_t __user *u_rand_bytes;
41116 const char *k_platform = ELF_PLATFORM;
41117 const char *k_base_platform = ELF_BASE_PLATFORM;
41118 - unsigned char k_rand_bytes[16];
41119 + u32 k_rand_bytes[4];
41120 int items;
41121 elf_addr_t *elf_info;
41122 int ei_index = 0;
41123 const struct cred *cred = current_cred();
41124 struct vm_area_struct *vma;
41125 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41126
41127 /*
41128 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41129 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41130 * Generate 16 random bytes for userspace PRNG seeding.
41131 */
41132 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41133 - u_rand_bytes = (elf_addr_t __user *)
41134 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41135 + srandom32(k_rand_bytes[0] ^ random32());
41136 + srandom32(k_rand_bytes[1] ^ random32());
41137 + srandom32(k_rand_bytes[2] ^ random32());
41138 + srandom32(k_rand_bytes[3] ^ random32());
41139 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41140 + u_rand_bytes = (elf_addr_t __user *) p;
41141 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41142 return -EFAULT;
41143
41144 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41145 return -EFAULT;
41146 current->mm->env_end = p;
41147
41148 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41149 +
41150 /* Put the elf_info on the stack in the right place. */
41151 sp = (elf_addr_t __user *)envp + 1;
41152 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41153 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41154 return -EFAULT;
41155 return 0;
41156 }
41157 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41158 {
41159 struct elf_phdr *elf_phdata;
41160 struct elf_phdr *eppnt;
41161 - unsigned long load_addr = 0;
41162 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41163 int load_addr_set = 0;
41164 unsigned long last_bss = 0, elf_bss = 0;
41165 - unsigned long error = ~0UL;
41166 + unsigned long error = -EINVAL;
41167 unsigned long total_size;
41168 int retval, i, size;
41169
41170 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41171 goto out_close;
41172 }
41173
41174 +#ifdef CONFIG_PAX_SEGMEXEC
41175 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41176 + pax_task_size = SEGMEXEC_TASK_SIZE;
41177 +#endif
41178 +
41179 eppnt = elf_phdata;
41180 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41181 if (eppnt->p_type == PT_LOAD) {
41182 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41183 k = load_addr + eppnt->p_vaddr;
41184 if (BAD_ADDR(k) ||
41185 eppnt->p_filesz > eppnt->p_memsz ||
41186 - eppnt->p_memsz > TASK_SIZE ||
41187 - TASK_SIZE - eppnt->p_memsz < k) {
41188 + eppnt->p_memsz > pax_task_size ||
41189 + pax_task_size - eppnt->p_memsz < k) {
41190 error = -ENOMEM;
41191 goto out_close;
41192 }
41193 @@ -528,6 +552,351 @@ out:
41194 return error;
41195 }
41196
41197 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41198 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41199 +{
41200 + unsigned long pax_flags = 0UL;
41201 +
41202 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41203 +
41204 +#ifdef CONFIG_PAX_PAGEEXEC
41205 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41206 + pax_flags |= MF_PAX_PAGEEXEC;
41207 +#endif
41208 +
41209 +#ifdef CONFIG_PAX_SEGMEXEC
41210 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41211 + pax_flags |= MF_PAX_SEGMEXEC;
41212 +#endif
41213 +
41214 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41215 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41216 + if ((__supported_pte_mask & _PAGE_NX))
41217 + pax_flags &= ~MF_PAX_SEGMEXEC;
41218 + else
41219 + pax_flags &= ~MF_PAX_PAGEEXEC;
41220 + }
41221 +#endif
41222 +
41223 +#ifdef CONFIG_PAX_EMUTRAMP
41224 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41225 + pax_flags |= MF_PAX_EMUTRAMP;
41226 +#endif
41227 +
41228 +#ifdef CONFIG_PAX_MPROTECT
41229 + if (elf_phdata->p_flags & PF_MPROTECT)
41230 + pax_flags |= MF_PAX_MPROTECT;
41231 +#endif
41232 +
41233 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41234 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41235 + pax_flags |= MF_PAX_RANDMMAP;
41236 +#endif
41237 +
41238 +#endif
41239 +
41240 + return pax_flags;
41241 +}
41242 +
41243 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41244 +{
41245 + unsigned long pax_flags = 0UL;
41246 +
41247 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41248 +
41249 +#ifdef CONFIG_PAX_PAGEEXEC
41250 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41251 + pax_flags |= MF_PAX_PAGEEXEC;
41252 +#endif
41253 +
41254 +#ifdef CONFIG_PAX_SEGMEXEC
41255 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41256 + pax_flags |= MF_PAX_SEGMEXEC;
41257 +#endif
41258 +
41259 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41260 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41261 + if ((__supported_pte_mask & _PAGE_NX))
41262 + pax_flags &= ~MF_PAX_SEGMEXEC;
41263 + else
41264 + pax_flags &= ~MF_PAX_PAGEEXEC;
41265 + }
41266 +#endif
41267 +
41268 +#ifdef CONFIG_PAX_EMUTRAMP
41269 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41270 + pax_flags |= MF_PAX_EMUTRAMP;
41271 +#endif
41272 +
41273 +#ifdef CONFIG_PAX_MPROTECT
41274 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41275 + pax_flags |= MF_PAX_MPROTECT;
41276 +#endif
41277 +
41278 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41279 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41280 + pax_flags |= MF_PAX_RANDMMAP;
41281 +#endif
41282 +
41283 +#endif
41284 +
41285 + return pax_flags;
41286 +}
41287 +
41288 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41289 +{
41290 + unsigned long pax_flags = 0UL;
41291 +
41292 +#ifdef CONFIG_PAX_EI_PAX
41293 +
41294 +#ifdef CONFIG_PAX_PAGEEXEC
41295 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41296 + pax_flags |= MF_PAX_PAGEEXEC;
41297 +#endif
41298 +
41299 +#ifdef CONFIG_PAX_SEGMEXEC
41300 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41301 + pax_flags |= MF_PAX_SEGMEXEC;
41302 +#endif
41303 +
41304 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41305 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41306 + if ((__supported_pte_mask & _PAGE_NX))
41307 + pax_flags &= ~MF_PAX_SEGMEXEC;
41308 + else
41309 + pax_flags &= ~MF_PAX_PAGEEXEC;
41310 + }
41311 +#endif
41312 +
41313 +#ifdef CONFIG_PAX_EMUTRAMP
41314 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41315 + pax_flags |= MF_PAX_EMUTRAMP;
41316 +#endif
41317 +
41318 +#ifdef CONFIG_PAX_MPROTECT
41319 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41320 + pax_flags |= MF_PAX_MPROTECT;
41321 +#endif
41322 +
41323 +#ifdef CONFIG_PAX_ASLR
41324 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41325 + pax_flags |= MF_PAX_RANDMMAP;
41326 +#endif
41327 +
41328 +#else
41329 +
41330 +#ifdef CONFIG_PAX_PAGEEXEC
41331 + pax_flags |= MF_PAX_PAGEEXEC;
41332 +#endif
41333 +
41334 +#ifdef CONFIG_PAX_MPROTECT
41335 + pax_flags |= MF_PAX_MPROTECT;
41336 +#endif
41337 +
41338 +#ifdef CONFIG_PAX_RANDMMAP
41339 + pax_flags |= MF_PAX_RANDMMAP;
41340 +#endif
41341 +
41342 +#ifdef CONFIG_PAX_SEGMEXEC
41343 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41344 + pax_flags &= ~MF_PAX_PAGEEXEC;
41345 + pax_flags |= MF_PAX_SEGMEXEC;
41346 + }
41347 +#endif
41348 +
41349 +#endif
41350 +
41351 + return pax_flags;
41352 +}
41353 +
41354 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41355 +{
41356 +
41357 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41358 + unsigned long i;
41359 +
41360 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41361 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41362 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41363 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41364 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41365 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41366 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41367 + return ~0UL;
41368 +
41369 +#ifdef CONFIG_PAX_SOFTMODE
41370 + if (pax_softmode)
41371 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41372 + else
41373 +#endif
41374 +
41375 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41376 + break;
41377 + }
41378 +#endif
41379 +
41380 + return ~0UL;
41381 +}
41382 +
41383 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41384 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41385 +{
41386 + unsigned long pax_flags = 0UL;
41387 +
41388 +#ifdef CONFIG_PAX_PAGEEXEC
41389 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41390 + pax_flags |= MF_PAX_PAGEEXEC;
41391 +#endif
41392 +
41393 +#ifdef CONFIG_PAX_SEGMEXEC
41394 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41395 + pax_flags |= MF_PAX_SEGMEXEC;
41396 +#endif
41397 +
41398 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41399 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41400 + if ((__supported_pte_mask & _PAGE_NX))
41401 + pax_flags &= ~MF_PAX_SEGMEXEC;
41402 + else
41403 + pax_flags &= ~MF_PAX_PAGEEXEC;
41404 + }
41405 +#endif
41406 +
41407 +#ifdef CONFIG_PAX_EMUTRAMP
41408 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41409 + pax_flags |= MF_PAX_EMUTRAMP;
41410 +#endif
41411 +
41412 +#ifdef CONFIG_PAX_MPROTECT
41413 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41414 + pax_flags |= MF_PAX_MPROTECT;
41415 +#endif
41416 +
41417 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41418 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41419 + pax_flags |= MF_PAX_RANDMMAP;
41420 +#endif
41421 +
41422 + return pax_flags;
41423 +}
41424 +
41425 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41426 +{
41427 + unsigned long pax_flags = 0UL;
41428 +
41429 +#ifdef CONFIG_PAX_PAGEEXEC
41430 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41431 + pax_flags |= MF_PAX_PAGEEXEC;
41432 +#endif
41433 +
41434 +#ifdef CONFIG_PAX_SEGMEXEC
41435 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41436 + pax_flags |= MF_PAX_SEGMEXEC;
41437 +#endif
41438 +
41439 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41440 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41441 + if ((__supported_pte_mask & _PAGE_NX))
41442 + pax_flags &= ~MF_PAX_SEGMEXEC;
41443 + else
41444 + pax_flags &= ~MF_PAX_PAGEEXEC;
41445 + }
41446 +#endif
41447 +
41448 +#ifdef CONFIG_PAX_EMUTRAMP
41449 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41450 + pax_flags |= MF_PAX_EMUTRAMP;
41451 +#endif
41452 +
41453 +#ifdef CONFIG_PAX_MPROTECT
41454 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41455 + pax_flags |= MF_PAX_MPROTECT;
41456 +#endif
41457 +
41458 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41459 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41460 + pax_flags |= MF_PAX_RANDMMAP;
41461 +#endif
41462 +
41463 + return pax_flags;
41464 +}
41465 +#endif
41466 +
41467 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41468 +{
41469 +
41470 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41471 + ssize_t xattr_size, i;
41472 + unsigned char xattr_value[5];
41473 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41474 +
41475 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41476 + if (xattr_size <= 0)
41477 + return ~0UL;
41478 +
41479 + for (i = 0; i < xattr_size; i++)
41480 + switch (xattr_value[i]) {
41481 + default:
41482 + return ~0UL;
41483 +
41484 +#define parse_flag(option1, option2, flag) \
41485 + case option1: \
41486 + pax_flags_hardmode |= MF_PAX_##flag; \
41487 + break; \
41488 + case option2: \
41489 + pax_flags_softmode |= MF_PAX_##flag; \
41490 + break;
41491 +
41492 + parse_flag('p', 'P', PAGEEXEC);
41493 + parse_flag('e', 'E', EMUTRAMP);
41494 + parse_flag('m', 'M', MPROTECT);
41495 + parse_flag('r', 'R', RANDMMAP);
41496 + parse_flag('s', 'S', SEGMEXEC);
41497 +
41498 +#undef parse_flag
41499 + }
41500 +
41501 + if (pax_flags_hardmode & pax_flags_softmode)
41502 + return ~0UL;
41503 +
41504 +#ifdef CONFIG_PAX_SOFTMODE
41505 + if (pax_softmode)
41506 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41507 + else
41508 +#endif
41509 +
41510 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41511 +#else
41512 + return ~0UL;
41513 +#endif
41514 +
41515 +}
41516 +
41517 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41518 +{
41519 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41520 +
41521 + pax_flags = pax_parse_ei_pax(elf_ex);
41522 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41523 + xattr_pax_flags = pax_parse_xattr_pax(file);
41524 +
41525 + if (pt_pax_flags == ~0UL)
41526 + pt_pax_flags = xattr_pax_flags;
41527 + else if (xattr_pax_flags == ~0UL)
41528 + xattr_pax_flags = pt_pax_flags;
41529 + if (pt_pax_flags != xattr_pax_flags)
41530 + return -EINVAL;
41531 + if (pt_pax_flags != ~0UL)
41532 + pax_flags = pt_pax_flags;
41533 +
41534 + if (0 > pax_check_flags(&pax_flags))
41535 + return -EINVAL;
41536 +
41537 + current->mm->pax_flags = pax_flags;
41538 + return 0;
41539 +}
41540 +#endif
41541 +
41542 /*
41543 * These are the functions used to load ELF style executables and shared
41544 * libraries. There is no binary dependent code anywhere else.
41545 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41546 {
41547 unsigned int random_variable = 0;
41548
41549 +#ifdef CONFIG_PAX_RANDUSTACK
41550 + if (randomize_va_space)
41551 + return stack_top - current->mm->delta_stack;
41552 +#endif
41553 +
41554 if ((current->flags & PF_RANDOMIZE) &&
41555 !(current->personality & ADDR_NO_RANDOMIZE)) {
41556 random_variable = get_random_int() & STACK_RND_MASK;
41557 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41558 unsigned long load_addr = 0, load_bias = 0;
41559 int load_addr_set = 0;
41560 char * elf_interpreter = NULL;
41561 - unsigned long error;
41562 + unsigned long error = 0;
41563 struct elf_phdr *elf_ppnt, *elf_phdata;
41564 unsigned long elf_bss, elf_brk;
41565 int retval, i;
41566 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41567 unsigned long start_code, end_code, start_data, end_data;
41568 unsigned long reloc_func_desc __maybe_unused = 0;
41569 int executable_stack = EXSTACK_DEFAULT;
41570 - unsigned long def_flags = 0;
41571 struct {
41572 struct elfhdr elf_ex;
41573 struct elfhdr interp_elf_ex;
41574 } *loc;
41575 + unsigned long pax_task_size = TASK_SIZE;
41576
41577 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41578 if (!loc) {
41579 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41580
41581 /* OK, This is the point of no return */
41582 current->flags &= ~PF_FORKNOEXEC;
41583 - current->mm->def_flags = def_flags;
41584 +
41585 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41586 + current->mm->pax_flags = 0UL;
41587 +#endif
41588 +
41589 +#ifdef CONFIG_PAX_DLRESOLVE
41590 + current->mm->call_dl_resolve = 0UL;
41591 +#endif
41592 +
41593 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41594 + current->mm->call_syscall = 0UL;
41595 +#endif
41596 +
41597 +#ifdef CONFIG_PAX_ASLR
41598 + current->mm->delta_mmap = 0UL;
41599 + current->mm->delta_stack = 0UL;
41600 +#endif
41601 +
41602 + current->mm->def_flags = 0;
41603 +
41604 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41605 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
41606 + send_sig(SIGKILL, current, 0);
41607 + goto out_free_dentry;
41608 + }
41609 +#endif
41610 +
41611 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
41612 + pax_set_initial_flags(bprm);
41613 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
41614 + if (pax_set_initial_flags_func)
41615 + (pax_set_initial_flags_func)(bprm);
41616 +#endif
41617 +
41618 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
41619 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
41620 + current->mm->context.user_cs_limit = PAGE_SIZE;
41621 + current->mm->def_flags |= VM_PAGEEXEC;
41622 + }
41623 +#endif
41624 +
41625 +#ifdef CONFIG_PAX_SEGMEXEC
41626 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
41627 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
41628 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
41629 + pax_task_size = SEGMEXEC_TASK_SIZE;
41630 + current->mm->def_flags |= VM_NOHUGEPAGE;
41631 + }
41632 +#endif
41633 +
41634 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
41635 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41636 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
41637 + put_cpu();
41638 + }
41639 +#endif
41640
41641 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
41642 may depend on the personality. */
41643 SET_PERSONALITY(loc->elf_ex);
41644 +
41645 +#ifdef CONFIG_PAX_ASLR
41646 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41647 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
41648 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
41649 + }
41650 +#endif
41651 +
41652 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41653 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41654 + executable_stack = EXSTACK_DISABLE_X;
41655 + current->personality &= ~READ_IMPLIES_EXEC;
41656 + } else
41657 +#endif
41658 +
41659 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
41660 current->personality |= READ_IMPLIES_EXEC;
41661
41662 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41663 #else
41664 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
41665 #endif
41666 +
41667 +#ifdef CONFIG_PAX_RANDMMAP
41668 + /* PaX: randomize base address at the default exe base if requested */
41669 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
41670 +#ifdef CONFIG_SPARC64
41671 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
41672 +#else
41673 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
41674 +#endif
41675 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
41676 + elf_flags |= MAP_FIXED;
41677 + }
41678 +#endif
41679 +
41680 }
41681
41682 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
41683 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41684 * allowed task size. Note that p_filesz must always be
41685 * <= p_memsz so it is only necessary to check p_memsz.
41686 */
41687 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41688 - elf_ppnt->p_memsz > TASK_SIZE ||
41689 - TASK_SIZE - elf_ppnt->p_memsz < k) {
41690 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41691 + elf_ppnt->p_memsz > pax_task_size ||
41692 + pax_task_size - elf_ppnt->p_memsz < k) {
41693 /* set_brk can never work. Avoid overflows. */
41694 send_sig(SIGKILL, current, 0);
41695 retval = -EINVAL;
41696 @@ -881,11 +1339,35 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41697 goto out_free_dentry;
41698 }
41699 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
41700 - send_sig(SIGSEGV, current, 0);
41701 - retval = -EFAULT; /* Nobody gets to see this, but.. */
41702 - goto out_free_dentry;
41703 + /*
41704 + * This bss-zeroing can fail if the ELF
41705 + * file specifies odd protections. So
41706 + * we don't check the return value
41707 + */
41708 }
41709
41710 +#ifdef CONFIG_PAX_RANDMMAP
41711 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41712 + unsigned long start, size;
41713 +
41714 + start = ELF_PAGEALIGN(elf_brk);
41715 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
41716 + down_write(&current->mm->mmap_sem);
41717 + retval = -ENOMEM;
41718 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
41719 + start = do_mmap(NULL, start, size, PROT_NONE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
41720 + retval = IS_ERR_VALUE(start) ? start : 0;
41721 + }
41722 + up_write(&current->mm->mmap_sem);
41723 + if (retval == 0)
41724 + retval = set_brk(start + size, start + size + PAGE_SIZE);
41725 + if (retval < 0) {
41726 + send_sig(SIGKILL, current, 0);
41727 + goto out_free_dentry;
41728 + }
41729 + }
41730 +#endif
41731 +
41732 if (elf_interpreter) {
41733 unsigned long uninitialized_var(interp_map_addr);
41734
41735 @@ -1098,7 +1580,7 @@ out:
41736 * Decide what to dump of a segment, part, all or none.
41737 */
41738 static unsigned long vma_dump_size(struct vm_area_struct *vma,
41739 - unsigned long mm_flags)
41740 + unsigned long mm_flags, long signr)
41741 {
41742 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
41743
41744 @@ -1132,7 +1614,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
41745 if (vma->vm_file == NULL)
41746 return 0;
41747
41748 - if (FILTER(MAPPED_PRIVATE))
41749 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
41750 goto whole;
41751
41752 /*
41753 @@ -1354,9 +1836,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
41754 {
41755 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
41756 int i = 0;
41757 - do
41758 + do {
41759 i += 2;
41760 - while (auxv[i - 2] != AT_NULL);
41761 + } while (auxv[i - 2] != AT_NULL);
41762 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
41763 }
41764
41765 @@ -1862,14 +2344,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
41766 }
41767
41768 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
41769 - unsigned long mm_flags)
41770 + struct coredump_params *cprm)
41771 {
41772 struct vm_area_struct *vma;
41773 size_t size = 0;
41774
41775 for (vma = first_vma(current, gate_vma); vma != NULL;
41776 vma = next_vma(vma, gate_vma))
41777 - size += vma_dump_size(vma, mm_flags);
41778 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41779 return size;
41780 }
41781
41782 @@ -1963,7 +2445,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41783
41784 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41785
41786 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
41787 + offset += elf_core_vma_data_size(gate_vma, cprm);
41788 offset += elf_core_extra_data_size();
41789 e_shoff = offset;
41790
41791 @@ -1977,10 +2459,12 @@ static int elf_core_dump(struct coredump_params *cprm)
41792 offset = dataoff;
41793
41794 size += sizeof(*elf);
41795 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41796 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
41797 goto end_coredump;
41798
41799 size += sizeof(*phdr4note);
41800 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41801 if (size > cprm->limit
41802 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
41803 goto end_coredump;
41804 @@ -1994,7 +2478,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41805 phdr.p_offset = offset;
41806 phdr.p_vaddr = vma->vm_start;
41807 phdr.p_paddr = 0;
41808 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
41809 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41810 phdr.p_memsz = vma->vm_end - vma->vm_start;
41811 offset += phdr.p_filesz;
41812 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
41813 @@ -2005,6 +2489,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41814 phdr.p_align = ELF_EXEC_PAGESIZE;
41815
41816 size += sizeof(phdr);
41817 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41818 if (size > cprm->limit
41819 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
41820 goto end_coredump;
41821 @@ -2029,7 +2514,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41822 unsigned long addr;
41823 unsigned long end;
41824
41825 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
41826 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41827
41828 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
41829 struct page *page;
41830 @@ -2038,6 +2523,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41831 page = get_dump_page(addr);
41832 if (page) {
41833 void *kaddr = kmap(page);
41834 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
41835 stop = ((size += PAGE_SIZE) > cprm->limit) ||
41836 !dump_write(cprm->file, kaddr,
41837 PAGE_SIZE);
41838 @@ -2055,6 +2541,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41839
41840 if (e_phnum == PN_XNUM) {
41841 size += sizeof(*shdr4extnum);
41842 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41843 if (size > cprm->limit
41844 || !dump_write(cprm->file, shdr4extnum,
41845 sizeof(*shdr4extnum)))
41846 @@ -2075,6 +2562,97 @@ out:
41847
41848 #endif /* CONFIG_ELF_CORE */
41849
41850 +#ifdef CONFIG_PAX_MPROTECT
41851 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
41852 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
41853 + * we'll remove VM_MAYWRITE for good on RELRO segments.
41854 + *
41855 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
41856 + * basis because we want to allow the common case and not the special ones.
41857 + */
41858 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
41859 +{
41860 + struct elfhdr elf_h;
41861 + struct elf_phdr elf_p;
41862 + unsigned long i;
41863 + unsigned long oldflags;
41864 + bool is_textrel_rw, is_textrel_rx, is_relro;
41865 +
41866 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
41867 + return;
41868 +
41869 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
41870 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
41871 +
41872 +#ifdef CONFIG_PAX_ELFRELOCS
41873 + /* possible TEXTREL */
41874 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
41875 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
41876 +#else
41877 + is_textrel_rw = false;
41878 + is_textrel_rx = false;
41879 +#endif
41880 +
41881 + /* possible RELRO */
41882 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
41883 +
41884 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
41885 + return;
41886 +
41887 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
41888 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
41889 +
41890 +#ifdef CONFIG_PAX_ETEXECRELOCS
41891 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41892 +#else
41893 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
41894 +#endif
41895 +
41896 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41897 + !elf_check_arch(&elf_h) ||
41898 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
41899 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
41900 + return;
41901 +
41902 + for (i = 0UL; i < elf_h.e_phnum; i++) {
41903 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
41904 + return;
41905 + switch (elf_p.p_type) {
41906 + case PT_DYNAMIC:
41907 + if (!is_textrel_rw && !is_textrel_rx)
41908 + continue;
41909 + i = 0UL;
41910 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
41911 + elf_dyn dyn;
41912 +
41913 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
41914 + return;
41915 + if (dyn.d_tag == DT_NULL)
41916 + return;
41917 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
41918 + gr_log_textrel(vma);
41919 + if (is_textrel_rw)
41920 + vma->vm_flags |= VM_MAYWRITE;
41921 + else
41922 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
41923 + vma->vm_flags &= ~VM_MAYWRITE;
41924 + return;
41925 + }
41926 + i++;
41927 + }
41928 + return;
41929 +
41930 + case PT_GNU_RELRO:
41931 + if (!is_relro)
41932 + continue;
41933 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
41934 + vma->vm_flags &= ~VM_MAYWRITE;
41935 + return;
41936 + }
41937 + }
41938 +}
41939 +#endif
41940 +
41941 static int __init init_elf_binfmt(void)
41942 {
41943 return register_binfmt(&elf_format);
41944 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
41945 index 1bffbe0..c8c283e 100644
41946 --- a/fs/binfmt_flat.c
41947 +++ b/fs/binfmt_flat.c
41948 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
41949 realdatastart = (unsigned long) -ENOMEM;
41950 printk("Unable to allocate RAM for process data, errno %d\n",
41951 (int)-realdatastart);
41952 + down_write(&current->mm->mmap_sem);
41953 do_munmap(current->mm, textpos, text_len);
41954 + up_write(&current->mm->mmap_sem);
41955 ret = realdatastart;
41956 goto err;
41957 }
41958 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
41959 }
41960 if (IS_ERR_VALUE(result)) {
41961 printk("Unable to read data+bss, errno %d\n", (int)-result);
41962 + down_write(&current->mm->mmap_sem);
41963 do_munmap(current->mm, textpos, text_len);
41964 do_munmap(current->mm, realdatastart, len);
41965 + up_write(&current->mm->mmap_sem);
41966 ret = result;
41967 goto err;
41968 }
41969 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
41970 }
41971 if (IS_ERR_VALUE(result)) {
41972 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
41973 + down_write(&current->mm->mmap_sem);
41974 do_munmap(current->mm, textpos, text_len + data_len + extra +
41975 MAX_SHARED_LIBS * sizeof(unsigned long));
41976 + up_write(&current->mm->mmap_sem);
41977 ret = result;
41978 goto err;
41979 }
41980 diff --git a/fs/bio.c b/fs/bio.c
41981 index b980ecd..74800bf 100644
41982 --- a/fs/bio.c
41983 +++ b/fs/bio.c
41984 @@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
41985 /*
41986 * Overflow, abort
41987 */
41988 - if (end < start)
41989 + if (end < start || end - start > INT_MAX - nr_pages)
41990 return ERR_PTR(-EINVAL);
41991
41992 nr_pages += end - start;
41993 @@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
41994 const int read = bio_data_dir(bio) == READ;
41995 struct bio_map_data *bmd = bio->bi_private;
41996 int i;
41997 - char *p = bmd->sgvecs[0].iov_base;
41998 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
41999
42000 __bio_for_each_segment(bvec, bio, i, 0) {
42001 char *addr = page_address(bvec->bv_page);
42002 diff --git a/fs/block_dev.c b/fs/block_dev.c
42003 index 5e9f198..6bf9b1c 100644
42004 --- a/fs/block_dev.c
42005 +++ b/fs/block_dev.c
42006 @@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42007 else if (bdev->bd_contains == bdev)
42008 return true; /* is a whole device which isn't held */
42009
42010 - else if (whole->bd_holder == bd_may_claim)
42011 + else if (whole->bd_holder == (void *)bd_may_claim)
42012 return true; /* is a partition of a device that is being partitioned */
42013 else if (whole->bd_holder != NULL)
42014 return false; /* is a partition of a held device */
42015 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42016 index d986824..af1befd 100644
42017 --- a/fs/btrfs/check-integrity.c
42018 +++ b/fs/btrfs/check-integrity.c
42019 @@ -157,7 +157,7 @@ struct btrfsic_block {
42020 union {
42021 bio_end_io_t *bio;
42022 bh_end_io_t *bh;
42023 - } orig_bio_bh_end_io;
42024 + } __no_const orig_bio_bh_end_io;
42025 int submit_bio_bh_rw;
42026 u64 flush_gen; /* only valid if !never_written */
42027 };
42028 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42029 index 0639a55..7d9e07f 100644
42030 --- a/fs/btrfs/ctree.c
42031 +++ b/fs/btrfs/ctree.c
42032 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42033 free_extent_buffer(buf);
42034 add_root_to_dirty_list(root);
42035 } else {
42036 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42037 - parent_start = parent->start;
42038 - else
42039 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42040 + if (parent)
42041 + parent_start = parent->start;
42042 + else
42043 + parent_start = 0;
42044 + } else
42045 parent_start = 0;
42046
42047 WARN_ON(trans->transid != btrfs_header_generation(parent));
42048 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42049 index 892b347..b3db246 100644
42050 --- a/fs/btrfs/inode.c
42051 +++ b/fs/btrfs/inode.c
42052 @@ -6930,7 +6930,7 @@ fail:
42053 return -ENOMEM;
42054 }
42055
42056 -static int btrfs_getattr(struct vfsmount *mnt,
42057 +int btrfs_getattr(struct vfsmount *mnt,
42058 struct dentry *dentry, struct kstat *stat)
42059 {
42060 struct inode *inode = dentry->d_inode;
42061 @@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42062 return 0;
42063 }
42064
42065 +EXPORT_SYMBOL(btrfs_getattr);
42066 +
42067 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42068 +{
42069 + return BTRFS_I(inode)->root->anon_dev;
42070 +}
42071 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42072 +
42073 /*
42074 * If a file is moved, it will inherit the cow and compression flags of the new
42075 * directory.
42076 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42077 index d8b5471..e5463d7 100644
42078 --- a/fs/btrfs/ioctl.c
42079 +++ b/fs/btrfs/ioctl.c
42080 @@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42081 for (i = 0; i < num_types; i++) {
42082 struct btrfs_space_info *tmp;
42083
42084 + /* Don't copy in more than we allocated */
42085 if (!slot_count)
42086 break;
42087
42088 + slot_count--;
42089 +
42090 info = NULL;
42091 rcu_read_lock();
42092 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42093 @@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42094 memcpy(dest, &space, sizeof(space));
42095 dest++;
42096 space_args.total_spaces++;
42097 - slot_count--;
42098 }
42099 - if (!slot_count)
42100 - break;
42101 }
42102 up_read(&info->groups_sem);
42103 }
42104
42105 - user_dest = (struct btrfs_ioctl_space_info *)
42106 + user_dest = (struct btrfs_ioctl_space_info __user *)
42107 (arg + sizeof(struct btrfs_ioctl_space_args));
42108
42109 if (copy_to_user(user_dest, dest_orig, alloc_size))
42110 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42111 index 8c1aae2..1e46446 100644
42112 --- a/fs/btrfs/relocation.c
42113 +++ b/fs/btrfs/relocation.c
42114 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42115 }
42116 spin_unlock(&rc->reloc_root_tree.lock);
42117
42118 - BUG_ON((struct btrfs_root *)node->data != root);
42119 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42120
42121 if (!del) {
42122 spin_lock(&rc->reloc_root_tree.lock);
42123 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42124 index 622f469..e8d2d55 100644
42125 --- a/fs/cachefiles/bind.c
42126 +++ b/fs/cachefiles/bind.c
42127 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42128 args);
42129
42130 /* start by checking things over */
42131 - ASSERT(cache->fstop_percent >= 0 &&
42132 - cache->fstop_percent < cache->fcull_percent &&
42133 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42134 cache->fcull_percent < cache->frun_percent &&
42135 cache->frun_percent < 100);
42136
42137 - ASSERT(cache->bstop_percent >= 0 &&
42138 - cache->bstop_percent < cache->bcull_percent &&
42139 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42140 cache->bcull_percent < cache->brun_percent &&
42141 cache->brun_percent < 100);
42142
42143 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42144 index 0a1467b..6a53245 100644
42145 --- a/fs/cachefiles/daemon.c
42146 +++ b/fs/cachefiles/daemon.c
42147 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42148 if (n > buflen)
42149 return -EMSGSIZE;
42150
42151 - if (copy_to_user(_buffer, buffer, n) != 0)
42152 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42153 return -EFAULT;
42154
42155 return n;
42156 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42157 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42158 return -EIO;
42159
42160 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42161 + if (datalen > PAGE_SIZE - 1)
42162 return -EOPNOTSUPP;
42163
42164 /* drag the command string into the kernel so we can parse it */
42165 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42166 if (args[0] != '%' || args[1] != '\0')
42167 return -EINVAL;
42168
42169 - if (fstop < 0 || fstop >= cache->fcull_percent)
42170 + if (fstop >= cache->fcull_percent)
42171 return cachefiles_daemon_range_error(cache, args);
42172
42173 cache->fstop_percent = fstop;
42174 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42175 if (args[0] != '%' || args[1] != '\0')
42176 return -EINVAL;
42177
42178 - if (bstop < 0 || bstop >= cache->bcull_percent)
42179 + if (bstop >= cache->bcull_percent)
42180 return cachefiles_daemon_range_error(cache, args);
42181
42182 cache->bstop_percent = bstop;
42183 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42184 index bd6bc1b..b627b53 100644
42185 --- a/fs/cachefiles/internal.h
42186 +++ b/fs/cachefiles/internal.h
42187 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42188 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42189 struct rb_root active_nodes; /* active nodes (can't be culled) */
42190 rwlock_t active_lock; /* lock for active_nodes */
42191 - atomic_t gravecounter; /* graveyard uniquifier */
42192 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42193 unsigned frun_percent; /* when to stop culling (% files) */
42194 unsigned fcull_percent; /* when to start culling (% files) */
42195 unsigned fstop_percent; /* when to stop allocating (% files) */
42196 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42197 * proc.c
42198 */
42199 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42200 -extern atomic_t cachefiles_lookup_histogram[HZ];
42201 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42202 -extern atomic_t cachefiles_create_histogram[HZ];
42203 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42204 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42205 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42206
42207 extern int __init cachefiles_proc_init(void);
42208 extern void cachefiles_proc_cleanup(void);
42209 static inline
42210 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42211 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42212 {
42213 unsigned long jif = jiffies - start_jif;
42214 if (jif >= HZ)
42215 jif = HZ - 1;
42216 - atomic_inc(&histogram[jif]);
42217 + atomic_inc_unchecked(&histogram[jif]);
42218 }
42219
42220 #else
42221 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42222 index a0358c2..d6137f2 100644
42223 --- a/fs/cachefiles/namei.c
42224 +++ b/fs/cachefiles/namei.c
42225 @@ -318,7 +318,7 @@ try_again:
42226 /* first step is to make up a grave dentry in the graveyard */
42227 sprintf(nbuffer, "%08x%08x",
42228 (uint32_t) get_seconds(),
42229 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42230 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42231
42232 /* do the multiway lock magic */
42233 trap = lock_rename(cache->graveyard, dir);
42234 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42235 index eccd339..4c1d995 100644
42236 --- a/fs/cachefiles/proc.c
42237 +++ b/fs/cachefiles/proc.c
42238 @@ -14,9 +14,9 @@
42239 #include <linux/seq_file.h>
42240 #include "internal.h"
42241
42242 -atomic_t cachefiles_lookup_histogram[HZ];
42243 -atomic_t cachefiles_mkdir_histogram[HZ];
42244 -atomic_t cachefiles_create_histogram[HZ];
42245 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42246 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42247 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42248
42249 /*
42250 * display the latency histogram
42251 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42252 return 0;
42253 default:
42254 index = (unsigned long) v - 3;
42255 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42256 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42257 - z = atomic_read(&cachefiles_create_histogram[index]);
42258 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42259 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42260 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42261 if (x == 0 && y == 0 && z == 0)
42262 return 0;
42263
42264 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42265 index 0e3c092..818480e 100644
42266 --- a/fs/cachefiles/rdwr.c
42267 +++ b/fs/cachefiles/rdwr.c
42268 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42269 old_fs = get_fs();
42270 set_fs(KERNEL_DS);
42271 ret = file->f_op->write(
42272 - file, (const void __user *) data, len, &pos);
42273 + file, (const void __force_user *) data, len, &pos);
42274 set_fs(old_fs);
42275 kunmap(page);
42276 if (ret != len)
42277 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42278 index 3e8094b..cb3ff3d 100644
42279 --- a/fs/ceph/dir.c
42280 +++ b/fs/ceph/dir.c
42281 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42282 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42283 struct ceph_mds_client *mdsc = fsc->mdsc;
42284 unsigned frag = fpos_frag(filp->f_pos);
42285 - int off = fpos_off(filp->f_pos);
42286 + unsigned int off = fpos_off(filp->f_pos);
42287 int err;
42288 u32 ftype;
42289 struct ceph_mds_reply_info_parsed *rinfo;
42290 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42291 if (nd &&
42292 (nd->flags & LOOKUP_OPEN) &&
42293 !(nd->intent.open.flags & O_CREAT)) {
42294 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
42295 + int mode = nd->intent.open.create_mode & ~current_umask();
42296 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42297 }
42298
42299 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42300 index 24b3dfc..3cd5454 100644
42301 --- a/fs/cifs/cifs_debug.c
42302 +++ b/fs/cifs/cifs_debug.c
42303 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42304
42305 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42306 #ifdef CONFIG_CIFS_STATS2
42307 - atomic_set(&totBufAllocCount, 0);
42308 - atomic_set(&totSmBufAllocCount, 0);
42309 + atomic_set_unchecked(&totBufAllocCount, 0);
42310 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42311 #endif /* CONFIG_CIFS_STATS2 */
42312 spin_lock(&cifs_tcp_ses_lock);
42313 list_for_each(tmp1, &cifs_tcp_ses_list) {
42314 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42315 tcon = list_entry(tmp3,
42316 struct cifs_tcon,
42317 tcon_list);
42318 - atomic_set(&tcon->num_smbs_sent, 0);
42319 - atomic_set(&tcon->num_writes, 0);
42320 - atomic_set(&tcon->num_reads, 0);
42321 - atomic_set(&tcon->num_oplock_brks, 0);
42322 - atomic_set(&tcon->num_opens, 0);
42323 - atomic_set(&tcon->num_posixopens, 0);
42324 - atomic_set(&tcon->num_posixmkdirs, 0);
42325 - atomic_set(&tcon->num_closes, 0);
42326 - atomic_set(&tcon->num_deletes, 0);
42327 - atomic_set(&tcon->num_mkdirs, 0);
42328 - atomic_set(&tcon->num_rmdirs, 0);
42329 - atomic_set(&tcon->num_renames, 0);
42330 - atomic_set(&tcon->num_t2renames, 0);
42331 - atomic_set(&tcon->num_ffirst, 0);
42332 - atomic_set(&tcon->num_fnext, 0);
42333 - atomic_set(&tcon->num_fclose, 0);
42334 - atomic_set(&tcon->num_hardlinks, 0);
42335 - atomic_set(&tcon->num_symlinks, 0);
42336 - atomic_set(&tcon->num_locks, 0);
42337 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42338 + atomic_set_unchecked(&tcon->num_writes, 0);
42339 + atomic_set_unchecked(&tcon->num_reads, 0);
42340 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42341 + atomic_set_unchecked(&tcon->num_opens, 0);
42342 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42343 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42344 + atomic_set_unchecked(&tcon->num_closes, 0);
42345 + atomic_set_unchecked(&tcon->num_deletes, 0);
42346 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42347 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42348 + atomic_set_unchecked(&tcon->num_renames, 0);
42349 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42350 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42351 + atomic_set_unchecked(&tcon->num_fnext, 0);
42352 + atomic_set_unchecked(&tcon->num_fclose, 0);
42353 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42354 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42355 + atomic_set_unchecked(&tcon->num_locks, 0);
42356 }
42357 }
42358 }
42359 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42360 smBufAllocCount.counter, cifs_min_small);
42361 #ifdef CONFIG_CIFS_STATS2
42362 seq_printf(m, "Total Large %d Small %d Allocations\n",
42363 - atomic_read(&totBufAllocCount),
42364 - atomic_read(&totSmBufAllocCount));
42365 + atomic_read_unchecked(&totBufAllocCount),
42366 + atomic_read_unchecked(&totSmBufAllocCount));
42367 #endif /* CONFIG_CIFS_STATS2 */
42368
42369 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42370 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42371 if (tcon->need_reconnect)
42372 seq_puts(m, "\tDISCONNECTED ");
42373 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42374 - atomic_read(&tcon->num_smbs_sent),
42375 - atomic_read(&tcon->num_oplock_brks));
42376 + atomic_read_unchecked(&tcon->num_smbs_sent),
42377 + atomic_read_unchecked(&tcon->num_oplock_brks));
42378 seq_printf(m, "\nReads: %d Bytes: %lld",
42379 - atomic_read(&tcon->num_reads),
42380 + atomic_read_unchecked(&tcon->num_reads),
42381 (long long)(tcon->bytes_read));
42382 seq_printf(m, "\nWrites: %d Bytes: %lld",
42383 - atomic_read(&tcon->num_writes),
42384 + atomic_read_unchecked(&tcon->num_writes),
42385 (long long)(tcon->bytes_written));
42386 seq_printf(m, "\nFlushes: %d",
42387 - atomic_read(&tcon->num_flushes));
42388 + atomic_read_unchecked(&tcon->num_flushes));
42389 seq_printf(m, "\nLocks: %d HardLinks: %d "
42390 "Symlinks: %d",
42391 - atomic_read(&tcon->num_locks),
42392 - atomic_read(&tcon->num_hardlinks),
42393 - atomic_read(&tcon->num_symlinks));
42394 + atomic_read_unchecked(&tcon->num_locks),
42395 + atomic_read_unchecked(&tcon->num_hardlinks),
42396 + atomic_read_unchecked(&tcon->num_symlinks));
42397 seq_printf(m, "\nOpens: %d Closes: %d "
42398 "Deletes: %d",
42399 - atomic_read(&tcon->num_opens),
42400 - atomic_read(&tcon->num_closes),
42401 - atomic_read(&tcon->num_deletes));
42402 + atomic_read_unchecked(&tcon->num_opens),
42403 + atomic_read_unchecked(&tcon->num_closes),
42404 + atomic_read_unchecked(&tcon->num_deletes));
42405 seq_printf(m, "\nPosix Opens: %d "
42406 "Posix Mkdirs: %d",
42407 - atomic_read(&tcon->num_posixopens),
42408 - atomic_read(&tcon->num_posixmkdirs));
42409 + atomic_read_unchecked(&tcon->num_posixopens),
42410 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42411 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42412 - atomic_read(&tcon->num_mkdirs),
42413 - atomic_read(&tcon->num_rmdirs));
42414 + atomic_read_unchecked(&tcon->num_mkdirs),
42415 + atomic_read_unchecked(&tcon->num_rmdirs));
42416 seq_printf(m, "\nRenames: %d T2 Renames %d",
42417 - atomic_read(&tcon->num_renames),
42418 - atomic_read(&tcon->num_t2renames));
42419 + atomic_read_unchecked(&tcon->num_renames),
42420 + atomic_read_unchecked(&tcon->num_t2renames));
42421 seq_printf(m, "\nFindFirst: %d FNext %d "
42422 "FClose %d",
42423 - atomic_read(&tcon->num_ffirst),
42424 - atomic_read(&tcon->num_fnext),
42425 - atomic_read(&tcon->num_fclose));
42426 + atomic_read_unchecked(&tcon->num_ffirst),
42427 + atomic_read_unchecked(&tcon->num_fnext),
42428 + atomic_read_unchecked(&tcon->num_fclose));
42429 }
42430 }
42431 }
42432 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42433 index b1fd382..df45435 100644
42434 --- a/fs/cifs/cifsfs.c
42435 +++ b/fs/cifs/cifsfs.c
42436 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
42437 cifs_req_cachep = kmem_cache_create("cifs_request",
42438 CIFSMaxBufSize +
42439 MAX_CIFS_HDR_SIZE, 0,
42440 - SLAB_HWCACHE_ALIGN, NULL);
42441 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42442 if (cifs_req_cachep == NULL)
42443 return -ENOMEM;
42444
42445 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
42446 efficient to alloc 1 per page off the slab compared to 17K (5page)
42447 alloc of large cifs buffers even when page debugging is on */
42448 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42449 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42450 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42451 NULL);
42452 if (cifs_sm_req_cachep == NULL) {
42453 mempool_destroy(cifs_req_poolp);
42454 @@ -1101,8 +1101,8 @@ init_cifs(void)
42455 atomic_set(&bufAllocCount, 0);
42456 atomic_set(&smBufAllocCount, 0);
42457 #ifdef CONFIG_CIFS_STATS2
42458 - atomic_set(&totBufAllocCount, 0);
42459 - atomic_set(&totSmBufAllocCount, 0);
42460 + atomic_set_unchecked(&totBufAllocCount, 0);
42461 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42462 #endif /* CONFIG_CIFS_STATS2 */
42463
42464 atomic_set(&midCount, 0);
42465 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42466 index 76e7d8b..4814992 100644
42467 --- a/fs/cifs/cifsglob.h
42468 +++ b/fs/cifs/cifsglob.h
42469 @@ -392,28 +392,28 @@ struct cifs_tcon {
42470 __u16 Flags; /* optional support bits */
42471 enum statusEnum tidStatus;
42472 #ifdef CONFIG_CIFS_STATS
42473 - atomic_t num_smbs_sent;
42474 - atomic_t num_writes;
42475 - atomic_t num_reads;
42476 - atomic_t num_flushes;
42477 - atomic_t num_oplock_brks;
42478 - atomic_t num_opens;
42479 - atomic_t num_closes;
42480 - atomic_t num_deletes;
42481 - atomic_t num_mkdirs;
42482 - atomic_t num_posixopens;
42483 - atomic_t num_posixmkdirs;
42484 - atomic_t num_rmdirs;
42485 - atomic_t num_renames;
42486 - atomic_t num_t2renames;
42487 - atomic_t num_ffirst;
42488 - atomic_t num_fnext;
42489 - atomic_t num_fclose;
42490 - atomic_t num_hardlinks;
42491 - atomic_t num_symlinks;
42492 - atomic_t num_locks;
42493 - atomic_t num_acl_get;
42494 - atomic_t num_acl_set;
42495 + atomic_unchecked_t num_smbs_sent;
42496 + atomic_unchecked_t num_writes;
42497 + atomic_unchecked_t num_reads;
42498 + atomic_unchecked_t num_flushes;
42499 + atomic_unchecked_t num_oplock_brks;
42500 + atomic_unchecked_t num_opens;
42501 + atomic_unchecked_t num_closes;
42502 + atomic_unchecked_t num_deletes;
42503 + atomic_unchecked_t num_mkdirs;
42504 + atomic_unchecked_t num_posixopens;
42505 + atomic_unchecked_t num_posixmkdirs;
42506 + atomic_unchecked_t num_rmdirs;
42507 + atomic_unchecked_t num_renames;
42508 + atomic_unchecked_t num_t2renames;
42509 + atomic_unchecked_t num_ffirst;
42510 + atomic_unchecked_t num_fnext;
42511 + atomic_unchecked_t num_fclose;
42512 + atomic_unchecked_t num_hardlinks;
42513 + atomic_unchecked_t num_symlinks;
42514 + atomic_unchecked_t num_locks;
42515 + atomic_unchecked_t num_acl_get;
42516 + atomic_unchecked_t num_acl_set;
42517 #ifdef CONFIG_CIFS_STATS2
42518 unsigned long long time_writes;
42519 unsigned long long time_reads;
42520 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
42521 }
42522
42523 #ifdef CONFIG_CIFS_STATS
42524 -#define cifs_stats_inc atomic_inc
42525 +#define cifs_stats_inc atomic_inc_unchecked
42526
42527 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42528 unsigned int bytes)
42529 @@ -987,8 +987,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42530 /* Various Debug counters */
42531 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42532 #ifdef CONFIG_CIFS_STATS2
42533 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42534 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42535 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42536 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42537 #endif
42538 GLOBAL_EXTERN atomic_t smBufAllocCount;
42539 GLOBAL_EXTERN atomic_t midCount;
42540 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42541 index 6b0e064..94e6c3c 100644
42542 --- a/fs/cifs/link.c
42543 +++ b/fs/cifs/link.c
42544 @@ -600,7 +600,7 @@ symlink_exit:
42545
42546 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42547 {
42548 - char *p = nd_get_link(nd);
42549 + const char *p = nd_get_link(nd);
42550 if (!IS_ERR(p))
42551 kfree(p);
42552 }
42553 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42554 index 703ef5c..2a44ed5 100644
42555 --- a/fs/cifs/misc.c
42556 +++ b/fs/cifs/misc.c
42557 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42558 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42559 atomic_inc(&bufAllocCount);
42560 #ifdef CONFIG_CIFS_STATS2
42561 - atomic_inc(&totBufAllocCount);
42562 + atomic_inc_unchecked(&totBufAllocCount);
42563 #endif /* CONFIG_CIFS_STATS2 */
42564 }
42565
42566 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42567 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42568 atomic_inc(&smBufAllocCount);
42569 #ifdef CONFIG_CIFS_STATS2
42570 - atomic_inc(&totSmBufAllocCount);
42571 + atomic_inc_unchecked(&totSmBufAllocCount);
42572 #endif /* CONFIG_CIFS_STATS2 */
42573
42574 }
42575 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42576 index 6901578..d402eb5 100644
42577 --- a/fs/coda/cache.c
42578 +++ b/fs/coda/cache.c
42579 @@ -24,7 +24,7 @@
42580 #include "coda_linux.h"
42581 #include "coda_cache.h"
42582
42583 -static atomic_t permission_epoch = ATOMIC_INIT(0);
42584 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42585
42586 /* replace or extend an acl cache hit */
42587 void coda_cache_enter(struct inode *inode, int mask)
42588 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42589 struct coda_inode_info *cii = ITOC(inode);
42590
42591 spin_lock(&cii->c_lock);
42592 - cii->c_cached_epoch = atomic_read(&permission_epoch);
42593 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
42594 if (cii->c_uid != current_fsuid()) {
42595 cii->c_uid = current_fsuid();
42596 cii->c_cached_perm = mask;
42597 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
42598 {
42599 struct coda_inode_info *cii = ITOC(inode);
42600 spin_lock(&cii->c_lock);
42601 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
42602 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
42603 spin_unlock(&cii->c_lock);
42604 }
42605
42606 /* remove all acl caches */
42607 void coda_cache_clear_all(struct super_block *sb)
42608 {
42609 - atomic_inc(&permission_epoch);
42610 + atomic_inc_unchecked(&permission_epoch);
42611 }
42612
42613
42614 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
42615 spin_lock(&cii->c_lock);
42616 hit = (mask & cii->c_cached_perm) == mask &&
42617 cii->c_uid == current_fsuid() &&
42618 - cii->c_cached_epoch == atomic_read(&permission_epoch);
42619 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
42620 spin_unlock(&cii->c_lock);
42621
42622 return hit;
42623 diff --git a/fs/compat.c b/fs/compat.c
42624 index 07880ba..3fb2862 100644
42625 --- a/fs/compat.c
42626 +++ b/fs/compat.c
42627 @@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
42628
42629 set_fs(KERNEL_DS);
42630 /* The __user pointer cast is valid because of the set_fs() */
42631 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
42632 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
42633 set_fs(oldfs);
42634 /* truncating is ok because it's a user address */
42635 if (!ret)
42636 @@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
42637 goto out;
42638
42639 ret = -EINVAL;
42640 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
42641 + if (nr_segs > UIO_MAXIOV)
42642 goto out;
42643 if (nr_segs > fast_segs) {
42644 ret = -ENOMEM;
42645 @@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
42646
42647 struct compat_readdir_callback {
42648 struct compat_old_linux_dirent __user *dirent;
42649 + struct file * file;
42650 int result;
42651 };
42652
42653 @@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
42654 buf->result = -EOVERFLOW;
42655 return -EOVERFLOW;
42656 }
42657 +
42658 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42659 + return 0;
42660 +
42661 buf->result++;
42662 dirent = buf->dirent;
42663 if (!access_ok(VERIFY_WRITE, dirent,
42664 @@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
42665
42666 buf.result = 0;
42667 buf.dirent = dirent;
42668 + buf.file = file;
42669
42670 error = vfs_readdir(file, compat_fillonedir, &buf);
42671 if (buf.result)
42672 @@ -901,6 +907,7 @@ struct compat_linux_dirent {
42673 struct compat_getdents_callback {
42674 struct compat_linux_dirent __user *current_dir;
42675 struct compat_linux_dirent __user *previous;
42676 + struct file * file;
42677 int count;
42678 int error;
42679 };
42680 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
42681 buf->error = -EOVERFLOW;
42682 return -EOVERFLOW;
42683 }
42684 +
42685 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42686 + return 0;
42687 +
42688 dirent = buf->previous;
42689 if (dirent) {
42690 if (__put_user(offset, &dirent->d_off))
42691 @@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
42692 buf.previous = NULL;
42693 buf.count = count;
42694 buf.error = 0;
42695 + buf.file = file;
42696
42697 error = vfs_readdir(file, compat_filldir, &buf);
42698 if (error >= 0)
42699 @@ -990,6 +1002,7 @@ out:
42700 struct compat_getdents_callback64 {
42701 struct linux_dirent64 __user *current_dir;
42702 struct linux_dirent64 __user *previous;
42703 + struct file * file;
42704 int count;
42705 int error;
42706 };
42707 @@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
42708 buf->error = -EINVAL; /* only used if we fail.. */
42709 if (reclen > buf->count)
42710 return -EINVAL;
42711 +
42712 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42713 + return 0;
42714 +
42715 dirent = buf->previous;
42716
42717 if (dirent) {
42718 @@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
42719 buf.previous = NULL;
42720 buf.count = count;
42721 buf.error = 0;
42722 + buf.file = file;
42723
42724 error = vfs_readdir(file, compat_filldir64, &buf);
42725 if (error >= 0)
42726 error = buf.error;
42727 lastdirent = buf.previous;
42728 if (lastdirent) {
42729 - typeof(lastdirent->d_off) d_off = file->f_pos;
42730 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
42731 if (__put_user_unaligned(d_off, &lastdirent->d_off))
42732 error = -EFAULT;
42733 else
42734 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
42735 index 112e45a..b59845b 100644
42736 --- a/fs/compat_binfmt_elf.c
42737 +++ b/fs/compat_binfmt_elf.c
42738 @@ -30,11 +30,13 @@
42739 #undef elf_phdr
42740 #undef elf_shdr
42741 #undef elf_note
42742 +#undef elf_dyn
42743 #undef elf_addr_t
42744 #define elfhdr elf32_hdr
42745 #define elf_phdr elf32_phdr
42746 #define elf_shdr elf32_shdr
42747 #define elf_note elf32_note
42748 +#define elf_dyn Elf32_Dyn
42749 #define elf_addr_t Elf32_Addr
42750
42751 /*
42752 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
42753 index a26bea1..ae23e72 100644
42754 --- a/fs/compat_ioctl.c
42755 +++ b/fs/compat_ioctl.c
42756 @@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
42757
42758 err = get_user(palp, &up->palette);
42759 err |= get_user(length, &up->length);
42760 + if (err)
42761 + return -EFAULT;
42762
42763 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
42764 err = put_user(compat_ptr(palp), &up_native->palette);
42765 @@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
42766 return -EFAULT;
42767 if (__get_user(udata, &ss32->iomem_base))
42768 return -EFAULT;
42769 - ss.iomem_base = compat_ptr(udata);
42770 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
42771 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
42772 __get_user(ss.port_high, &ss32->port_high))
42773 return -EFAULT;
42774 @@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
42775 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
42776 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
42777 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
42778 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42779 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42780 return -EFAULT;
42781
42782 return ioctl_preallocate(file, p);
42783 @@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
42784 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
42785 {
42786 unsigned int a, b;
42787 - a = *(unsigned int *)p;
42788 - b = *(unsigned int *)q;
42789 + a = *(const unsigned int *)p;
42790 + b = *(const unsigned int *)q;
42791 if (a > b)
42792 return 1;
42793 if (a < b)
42794 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
42795 index 5ddd7eb..c18bf04 100644
42796 --- a/fs/configfs/dir.c
42797 +++ b/fs/configfs/dir.c
42798 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42799 }
42800 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
42801 struct configfs_dirent *next;
42802 - const char * name;
42803 + const unsigned char * name;
42804 + char d_name[sizeof(next->s_dentry->d_iname)];
42805 int len;
42806 struct inode *inode = NULL;
42807
42808 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42809 continue;
42810
42811 name = configfs_get_name(next);
42812 - len = strlen(name);
42813 + if (next->s_dentry && name == next->s_dentry->d_iname) {
42814 + len = next->s_dentry->d_name.len;
42815 + memcpy(d_name, name, len);
42816 + name = d_name;
42817 + } else
42818 + len = strlen(name);
42819
42820 /*
42821 * We'll have a dentry and an inode for
42822 diff --git a/fs/dcache.c b/fs/dcache.c
42823 index bcbdb33..55ffe97 100644
42824 --- a/fs/dcache.c
42825 +++ b/fs/dcache.c
42826 @@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
42827 static struct hlist_bl_head *dentry_hashtable __read_mostly;
42828
42829 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
42830 - unsigned long hash)
42831 + unsigned int hash)
42832 {
42833 - hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
42834 - hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
42835 + hash += (unsigned long) parent / L1_CACHE_BYTES;
42836 + hash = hash + (hash >> D_HASHBITS);
42837 return dentry_hashtable + (hash & D_HASHMASK);
42838 }
42839
42840 @@ -3066,7 +3066,7 @@ void __init vfs_caches_init(unsigned long mempages)
42841 mempages -= reserve;
42842
42843 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
42844 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
42845 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
42846
42847 dcache_init();
42848 inode_init();
42849 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
42850 index 956d5dd..e755e04 100644
42851 --- a/fs/debugfs/inode.c
42852 +++ b/fs/debugfs/inode.c
42853 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
42854 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
42855 {
42856 return debugfs_create_file(name,
42857 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
42858 + S_IFDIR | S_IRWXU,
42859 +#else
42860 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
42861 +#endif
42862 parent, NULL, NULL);
42863 }
42864 EXPORT_SYMBOL_GPL(debugfs_create_dir);
42865 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
42866 index ab35b11..b30af66 100644
42867 --- a/fs/ecryptfs/inode.c
42868 +++ b/fs/ecryptfs/inode.c
42869 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
42870 old_fs = get_fs();
42871 set_fs(get_ds());
42872 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
42873 - (char __user *)lower_buf,
42874 + (char __force_user *)lower_buf,
42875 lower_bufsiz);
42876 set_fs(old_fs);
42877 if (rc < 0)
42878 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42879 }
42880 old_fs = get_fs();
42881 set_fs(get_ds());
42882 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
42883 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
42884 set_fs(old_fs);
42885 if (rc < 0) {
42886 kfree(buf);
42887 @@ -733,7 +733,7 @@ out:
42888 static void
42889 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
42890 {
42891 - char *buf = nd_get_link(nd);
42892 + const char *buf = nd_get_link(nd);
42893 if (!IS_ERR(buf)) {
42894 /* Free the char* */
42895 kfree(buf);
42896 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
42897 index 3a06f40..f7af544 100644
42898 --- a/fs/ecryptfs/miscdev.c
42899 +++ b/fs/ecryptfs/miscdev.c
42900 @@ -345,7 +345,7 @@ check_list:
42901 goto out_unlock_msg_ctx;
42902 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
42903 if (msg_ctx->msg) {
42904 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
42905 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
42906 goto out_unlock_msg_ctx;
42907 i += packet_length_size;
42908 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
42909 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
42910 index b2a34a1..162fa69 100644
42911 --- a/fs/ecryptfs/read_write.c
42912 +++ b/fs/ecryptfs/read_write.c
42913 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
42914 return -EIO;
42915 fs_save = get_fs();
42916 set_fs(get_ds());
42917 - rc = vfs_write(lower_file, data, size, &offset);
42918 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
42919 set_fs(fs_save);
42920 mark_inode_dirty_sync(ecryptfs_inode);
42921 return rc;
42922 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
42923 return -EIO;
42924 fs_save = get_fs();
42925 set_fs(get_ds());
42926 - rc = vfs_read(lower_file, data, size, &offset);
42927 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
42928 set_fs(fs_save);
42929 return rc;
42930 }
42931 diff --git a/fs/exec.c b/fs/exec.c
42932 index 153dee1..8ee97ba 100644
42933 --- a/fs/exec.c
42934 +++ b/fs/exec.c
42935 @@ -55,6 +55,13 @@
42936 #include <linux/pipe_fs_i.h>
42937 #include <linux/oom.h>
42938 #include <linux/compat.h>
42939 +#include <linux/random.h>
42940 +#include <linux/seq_file.h>
42941 +
42942 +#ifdef CONFIG_PAX_REFCOUNT
42943 +#include <linux/kallsyms.h>
42944 +#include <linux/kdebug.h>
42945 +#endif
42946
42947 #include <asm/uaccess.h>
42948 #include <asm/mmu_context.h>
42949 @@ -63,6 +70,15 @@
42950 #include <trace/events/task.h>
42951 #include "internal.h"
42952
42953 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
42954 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
42955 +#endif
42956 +
42957 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
42958 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
42959 +EXPORT_SYMBOL(pax_set_initial_flags_func);
42960 +#endif
42961 +
42962 int core_uses_pid;
42963 char core_pattern[CORENAME_MAX_SIZE] = "core";
42964 unsigned int core_pipe_limit;
42965 @@ -72,7 +88,7 @@ struct core_name {
42966 char *corename;
42967 int used, size;
42968 };
42969 -static atomic_t call_count = ATOMIC_INIT(1);
42970 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
42971
42972 /* The maximal length of core_pattern is also specified in sysctl.c */
42973
42974 @@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
42975 int write)
42976 {
42977 struct page *page;
42978 - int ret;
42979
42980 -#ifdef CONFIG_STACK_GROWSUP
42981 - if (write) {
42982 - ret = expand_downwards(bprm->vma, pos);
42983 - if (ret < 0)
42984 - return NULL;
42985 - }
42986 -#endif
42987 - ret = get_user_pages(current, bprm->mm, pos,
42988 - 1, write, 1, &page, NULL);
42989 - if (ret <= 0)
42990 + if (0 > expand_downwards(bprm->vma, pos))
42991 + return NULL;
42992 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
42993 return NULL;
42994
42995 if (write) {
42996 @@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
42997 if (size <= ARG_MAX)
42998 return page;
42999
43000 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43001 + // only allow 512KB for argv+env on suid/sgid binaries
43002 + // to prevent easy ASLR exhaustion
43003 + if (((bprm->cred->euid != current_euid()) ||
43004 + (bprm->cred->egid != current_egid())) &&
43005 + (size > (512 * 1024))) {
43006 + put_page(page);
43007 + return NULL;
43008 + }
43009 +#endif
43010 +
43011 /*
43012 * Limit to 1/4-th the stack size for the argv+env strings.
43013 * This ensures that:
43014 @@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43015 vma->vm_end = STACK_TOP_MAX;
43016 vma->vm_start = vma->vm_end - PAGE_SIZE;
43017 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43018 +
43019 +#ifdef CONFIG_PAX_SEGMEXEC
43020 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43021 +#endif
43022 +
43023 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43024 INIT_LIST_HEAD(&vma->anon_vma_chain);
43025
43026 @@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43027 mm->stack_vm = mm->total_vm = 1;
43028 up_write(&mm->mmap_sem);
43029 bprm->p = vma->vm_end - sizeof(void *);
43030 +
43031 +#ifdef CONFIG_PAX_RANDUSTACK
43032 + if (randomize_va_space)
43033 + bprm->p ^= random32() & ~PAGE_MASK;
43034 +#endif
43035 +
43036 return 0;
43037 err:
43038 up_write(&mm->mmap_sem);
43039 @@ -398,19 +428,7 @@ err:
43040 return err;
43041 }
43042
43043 -struct user_arg_ptr {
43044 -#ifdef CONFIG_COMPAT
43045 - bool is_compat;
43046 -#endif
43047 - union {
43048 - const char __user *const __user *native;
43049 -#ifdef CONFIG_COMPAT
43050 - compat_uptr_t __user *compat;
43051 -#endif
43052 - } ptr;
43053 -};
43054 -
43055 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43056 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43057 {
43058 const char __user *native;
43059
43060 @@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43061 compat_uptr_t compat;
43062
43063 if (get_user(compat, argv.ptr.compat + nr))
43064 - return ERR_PTR(-EFAULT);
43065 + return (const char __force_user *)ERR_PTR(-EFAULT);
43066
43067 return compat_ptr(compat);
43068 }
43069 #endif
43070
43071 if (get_user(native, argv.ptr.native + nr))
43072 - return ERR_PTR(-EFAULT);
43073 + return (const char __force_user *)ERR_PTR(-EFAULT);
43074
43075 return native;
43076 }
43077 @@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
43078 if (!p)
43079 break;
43080
43081 - if (IS_ERR(p))
43082 + if (IS_ERR((const char __force_kernel *)p))
43083 return -EFAULT;
43084
43085 if (i++ >= max)
43086 @@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43087
43088 ret = -EFAULT;
43089 str = get_user_arg_ptr(argv, argc);
43090 - if (IS_ERR(str))
43091 + if (IS_ERR((const char __force_kernel *)str))
43092 goto out;
43093
43094 len = strnlen_user(str, MAX_ARG_STRLEN);
43095 @@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43096 int r;
43097 mm_segment_t oldfs = get_fs();
43098 struct user_arg_ptr argv = {
43099 - .ptr.native = (const char __user *const __user *)__argv,
43100 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43101 };
43102
43103 set_fs(KERNEL_DS);
43104 @@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43105 unsigned long new_end = old_end - shift;
43106 struct mmu_gather tlb;
43107
43108 - BUG_ON(new_start > new_end);
43109 + if (new_start >= new_end || new_start < mmap_min_addr)
43110 + return -ENOMEM;
43111
43112 /*
43113 * ensure there are no vmas between where we want to go
43114 @@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43115 if (vma != find_vma(mm, new_start))
43116 return -EFAULT;
43117
43118 +#ifdef CONFIG_PAX_SEGMEXEC
43119 + BUG_ON(pax_find_mirror_vma(vma));
43120 +#endif
43121 +
43122 /*
43123 * cover the whole range: [new_start, old_end)
43124 */
43125 @@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43126 stack_top = arch_align_stack(stack_top);
43127 stack_top = PAGE_ALIGN(stack_top);
43128
43129 - if (unlikely(stack_top < mmap_min_addr) ||
43130 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43131 - return -ENOMEM;
43132 -
43133 stack_shift = vma->vm_end - stack_top;
43134
43135 bprm->p -= stack_shift;
43136 @@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43137 bprm->exec -= stack_shift;
43138
43139 down_write(&mm->mmap_sem);
43140 +
43141 + /* Move stack pages down in memory. */
43142 + if (stack_shift) {
43143 + ret = shift_arg_pages(vma, stack_shift);
43144 + if (ret)
43145 + goto out_unlock;
43146 + }
43147 +
43148 vm_flags = VM_STACK_FLAGS;
43149
43150 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43151 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43152 + vm_flags &= ~VM_EXEC;
43153 +
43154 +#ifdef CONFIG_PAX_MPROTECT
43155 + if (mm->pax_flags & MF_PAX_MPROTECT)
43156 + vm_flags &= ~VM_MAYEXEC;
43157 +#endif
43158 +
43159 + }
43160 +#endif
43161 +
43162 /*
43163 * Adjust stack execute permissions; explicitly enable for
43164 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43165 @@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43166 goto out_unlock;
43167 BUG_ON(prev != vma);
43168
43169 - /* Move stack pages down in memory. */
43170 - if (stack_shift) {
43171 - ret = shift_arg_pages(vma, stack_shift);
43172 - if (ret)
43173 - goto out_unlock;
43174 - }
43175 -
43176 /* mprotect_fixup is overkill to remove the temporary stack flags */
43177 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43178
43179 @@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
43180 old_fs = get_fs();
43181 set_fs(get_ds());
43182 /* The cast to a user pointer is valid due to the set_fs() */
43183 - result = vfs_read(file, (void __user *)addr, count, &pos);
43184 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43185 set_fs(old_fs);
43186 return result;
43187 }
43188 @@ -1252,7 +1284,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43189 }
43190 rcu_read_unlock();
43191
43192 - if (p->fs->users > n_fs) {
43193 + if (atomic_read(&p->fs->users) > n_fs) {
43194 bprm->unsafe |= LSM_UNSAFE_SHARE;
43195 } else {
43196 res = -EAGAIN;
43197 @@ -1447,6 +1479,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43198
43199 EXPORT_SYMBOL(search_binary_handler);
43200
43201 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43202 +static DEFINE_PER_CPU(u64, exec_counter);
43203 +static int __init init_exec_counters(void)
43204 +{
43205 + unsigned int cpu;
43206 +
43207 + for_each_possible_cpu(cpu) {
43208 + per_cpu(exec_counter, cpu) = (u64)cpu;
43209 + }
43210 +
43211 + return 0;
43212 +}
43213 +early_initcall(init_exec_counters);
43214 +static inline void increment_exec_counter(void)
43215 +{
43216 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43217 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43218 +}
43219 +#else
43220 +static inline void increment_exec_counter(void) {}
43221 +#endif
43222 +
43223 /*
43224 * sys_execve() executes a new program.
43225 */
43226 @@ -1455,6 +1509,11 @@ static int do_execve_common(const char *filename,
43227 struct user_arg_ptr envp,
43228 struct pt_regs *regs)
43229 {
43230 +#ifdef CONFIG_GRKERNSEC
43231 + struct file *old_exec_file;
43232 + struct acl_subject_label *old_acl;
43233 + struct rlimit old_rlim[RLIM_NLIMITS];
43234 +#endif
43235 struct linux_binprm *bprm;
43236 struct file *file;
43237 struct files_struct *displaced;
43238 @@ -1462,6 +1521,8 @@ static int do_execve_common(const char *filename,
43239 int retval;
43240 const struct cred *cred = current_cred();
43241
43242 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43243 +
43244 /*
43245 * We move the actual failure in case of RLIMIT_NPROC excess from
43246 * set*uid() to execve() because too many poorly written programs
43247 @@ -1502,12 +1563,27 @@ static int do_execve_common(const char *filename,
43248 if (IS_ERR(file))
43249 goto out_unmark;
43250
43251 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43252 + retval = -EPERM;
43253 + goto out_file;
43254 + }
43255 +
43256 sched_exec();
43257
43258 bprm->file = file;
43259 bprm->filename = filename;
43260 bprm->interp = filename;
43261
43262 + if (gr_process_user_ban()) {
43263 + retval = -EPERM;
43264 + goto out_file;
43265 + }
43266 +
43267 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43268 + retval = -EACCES;
43269 + goto out_file;
43270 + }
43271 +
43272 retval = bprm_mm_init(bprm);
43273 if (retval)
43274 goto out_file;
43275 @@ -1524,24 +1600,65 @@ static int do_execve_common(const char *filename,
43276 if (retval < 0)
43277 goto out;
43278
43279 +#ifdef CONFIG_GRKERNSEC
43280 + old_acl = current->acl;
43281 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43282 + old_exec_file = current->exec_file;
43283 + get_file(file);
43284 + current->exec_file = file;
43285 +#endif
43286 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43287 + /* limit suid stack to 8MB
43288 + we saved the old limits above and will restore them if this exec fails
43289 + */
43290 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43291 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43292 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43293 +#endif
43294 +
43295 + if (!gr_tpe_allow(file)) {
43296 + retval = -EACCES;
43297 + goto out_fail;
43298 + }
43299 +
43300 + if (gr_check_crash_exec(file)) {
43301 + retval = -EACCES;
43302 + goto out_fail;
43303 + }
43304 +
43305 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43306 + bprm->unsafe);
43307 + if (retval < 0)
43308 + goto out_fail;
43309 +
43310 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43311 if (retval < 0)
43312 - goto out;
43313 + goto out_fail;
43314
43315 bprm->exec = bprm->p;
43316 retval = copy_strings(bprm->envc, envp, bprm);
43317 if (retval < 0)
43318 - goto out;
43319 + goto out_fail;
43320
43321 retval = copy_strings(bprm->argc, argv, bprm);
43322 if (retval < 0)
43323 - goto out;
43324 + goto out_fail;
43325 +
43326 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43327 +
43328 + gr_handle_exec_args(bprm, argv);
43329
43330 retval = search_binary_handler(bprm,regs);
43331 if (retval < 0)
43332 - goto out;
43333 + goto out_fail;
43334 +#ifdef CONFIG_GRKERNSEC
43335 + if (old_exec_file)
43336 + fput(old_exec_file);
43337 +#endif
43338
43339 /* execve succeeded */
43340 +
43341 + increment_exec_counter();
43342 current->fs->in_exec = 0;
43343 current->in_execve = 0;
43344 acct_update_integrals(current);
43345 @@ -1550,6 +1667,14 @@ static int do_execve_common(const char *filename,
43346 put_files_struct(displaced);
43347 return retval;
43348
43349 +out_fail:
43350 +#ifdef CONFIG_GRKERNSEC
43351 + current->acl = old_acl;
43352 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43353 + fput(current->exec_file);
43354 + current->exec_file = old_exec_file;
43355 +#endif
43356 +
43357 out:
43358 if (bprm->mm) {
43359 acct_arg_size(bprm, 0);
43360 @@ -1623,7 +1748,7 @@ static int expand_corename(struct core_name *cn)
43361 {
43362 char *old_corename = cn->corename;
43363
43364 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43365 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43366 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43367
43368 if (!cn->corename) {
43369 @@ -1720,7 +1845,7 @@ static int format_corename(struct core_name *cn, long signr)
43370 int pid_in_pattern = 0;
43371 int err = 0;
43372
43373 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43374 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43375 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43376 cn->used = 0;
43377
43378 @@ -1817,6 +1942,218 @@ out:
43379 return ispipe;
43380 }
43381
43382 +int pax_check_flags(unsigned long *flags)
43383 +{
43384 + int retval = 0;
43385 +
43386 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43387 + if (*flags & MF_PAX_SEGMEXEC)
43388 + {
43389 + *flags &= ~MF_PAX_SEGMEXEC;
43390 + retval = -EINVAL;
43391 + }
43392 +#endif
43393 +
43394 + if ((*flags & MF_PAX_PAGEEXEC)
43395 +
43396 +#ifdef CONFIG_PAX_PAGEEXEC
43397 + && (*flags & MF_PAX_SEGMEXEC)
43398 +#endif
43399 +
43400 + )
43401 + {
43402 + *flags &= ~MF_PAX_PAGEEXEC;
43403 + retval = -EINVAL;
43404 + }
43405 +
43406 + if ((*flags & MF_PAX_MPROTECT)
43407 +
43408 +#ifdef CONFIG_PAX_MPROTECT
43409 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43410 +#endif
43411 +
43412 + )
43413 + {
43414 + *flags &= ~MF_PAX_MPROTECT;
43415 + retval = -EINVAL;
43416 + }
43417 +
43418 + if ((*flags & MF_PAX_EMUTRAMP)
43419 +
43420 +#ifdef CONFIG_PAX_EMUTRAMP
43421 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43422 +#endif
43423 +
43424 + )
43425 + {
43426 + *flags &= ~MF_PAX_EMUTRAMP;
43427 + retval = -EINVAL;
43428 + }
43429 +
43430 + return retval;
43431 +}
43432 +
43433 +EXPORT_SYMBOL(pax_check_flags);
43434 +
43435 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43436 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43437 +{
43438 + struct task_struct *tsk = current;
43439 + struct mm_struct *mm = current->mm;
43440 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43441 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43442 + char *path_exec = NULL;
43443 + char *path_fault = NULL;
43444 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43445 +
43446 + if (buffer_exec && buffer_fault) {
43447 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43448 +
43449 + down_read(&mm->mmap_sem);
43450 + vma = mm->mmap;
43451 + while (vma && (!vma_exec || !vma_fault)) {
43452 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43453 + vma_exec = vma;
43454 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43455 + vma_fault = vma;
43456 + vma = vma->vm_next;
43457 + }
43458 + if (vma_exec) {
43459 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43460 + if (IS_ERR(path_exec))
43461 + path_exec = "<path too long>";
43462 + else {
43463 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43464 + if (path_exec) {
43465 + *path_exec = 0;
43466 + path_exec = buffer_exec;
43467 + } else
43468 + path_exec = "<path too long>";
43469 + }
43470 + }
43471 + if (vma_fault) {
43472 + start = vma_fault->vm_start;
43473 + end = vma_fault->vm_end;
43474 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43475 + if (vma_fault->vm_file) {
43476 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43477 + if (IS_ERR(path_fault))
43478 + path_fault = "<path too long>";
43479 + else {
43480 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43481 + if (path_fault) {
43482 + *path_fault = 0;
43483 + path_fault = buffer_fault;
43484 + } else
43485 + path_fault = "<path too long>";
43486 + }
43487 + } else
43488 + path_fault = "<anonymous mapping>";
43489 + }
43490 + up_read(&mm->mmap_sem);
43491 + }
43492 + if (tsk->signal->curr_ip)
43493 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43494 + else
43495 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43496 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43497 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43498 + task_uid(tsk), task_euid(tsk), pc, sp);
43499 + free_page((unsigned long)buffer_exec);
43500 + free_page((unsigned long)buffer_fault);
43501 + pax_report_insns(regs, pc, sp);
43502 + do_coredump(SIGKILL, SIGKILL, regs);
43503 +}
43504 +#endif
43505 +
43506 +#ifdef CONFIG_PAX_REFCOUNT
43507 +void pax_report_refcount_overflow(struct pt_regs *regs)
43508 +{
43509 + if (current->signal->curr_ip)
43510 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43511 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43512 + else
43513 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43514 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43515 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43516 + show_regs(regs);
43517 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43518 +}
43519 +#endif
43520 +
43521 +#ifdef CONFIG_PAX_USERCOPY
43522 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43523 +int object_is_on_stack(const void *obj, unsigned long len)
43524 +{
43525 + const void * const stack = task_stack_page(current);
43526 + const void * const stackend = stack + THREAD_SIZE;
43527 +
43528 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43529 + const void *frame = NULL;
43530 + const void *oldframe;
43531 +#endif
43532 +
43533 + if (obj + len < obj)
43534 + return -1;
43535 +
43536 + if (obj + len <= stack || stackend <= obj)
43537 + return 0;
43538 +
43539 + if (obj < stack || stackend < obj + len)
43540 + return -1;
43541 +
43542 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43543 + oldframe = __builtin_frame_address(1);
43544 + if (oldframe)
43545 + frame = __builtin_frame_address(2);
43546 + /*
43547 + low ----------------------------------------------> high
43548 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43549 + ^----------------^
43550 + allow copies only within here
43551 + */
43552 + while (stack <= frame && frame < stackend) {
43553 + /* if obj + len extends past the last frame, this
43554 + check won't pass and the next frame will be 0,
43555 + causing us to bail out and correctly report
43556 + the copy as invalid
43557 + */
43558 + if (obj + len <= frame)
43559 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43560 + oldframe = frame;
43561 + frame = *(const void * const *)frame;
43562 + }
43563 + return -1;
43564 +#else
43565 + return 1;
43566 +#endif
43567 +}
43568 +
43569 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43570 +{
43571 + if (current->signal->curr_ip)
43572 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43573 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43574 + else
43575 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43576 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43577 + dump_stack();
43578 + gr_handle_kernel_exploit();
43579 + do_group_exit(SIGKILL);
43580 +}
43581 +#endif
43582 +
43583 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43584 +void pax_track_stack(void)
43585 +{
43586 + unsigned long sp = (unsigned long)&sp;
43587 + if (sp < current_thread_info()->lowest_stack &&
43588 + sp > (unsigned long)task_stack_page(current))
43589 + current_thread_info()->lowest_stack = sp;
43590 +}
43591 +EXPORT_SYMBOL(pax_track_stack);
43592 +#endif
43593 +
43594 static int zap_process(struct task_struct *start, int exit_code)
43595 {
43596 struct task_struct *t;
43597 @@ -2014,17 +2351,17 @@ static void wait_for_dump_helpers(struct file *file)
43598 pipe = file->f_path.dentry->d_inode->i_pipe;
43599
43600 pipe_lock(pipe);
43601 - pipe->readers++;
43602 - pipe->writers--;
43603 + atomic_inc(&pipe->readers);
43604 + atomic_dec(&pipe->writers);
43605
43606 - while ((pipe->readers > 1) && (!signal_pending(current))) {
43607 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
43608 wake_up_interruptible_sync(&pipe->wait);
43609 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43610 pipe_wait(pipe);
43611 }
43612
43613 - pipe->readers--;
43614 - pipe->writers++;
43615 + atomic_dec(&pipe->readers);
43616 + atomic_inc(&pipe->writers);
43617 pipe_unlock(pipe);
43618
43619 }
43620 @@ -2085,7 +2422,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43621 int retval = 0;
43622 int flag = 0;
43623 int ispipe;
43624 - static atomic_t core_dump_count = ATOMIC_INIT(0);
43625 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
43626 struct coredump_params cprm = {
43627 .signr = signr,
43628 .regs = regs,
43629 @@ -2100,6 +2437,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43630
43631 audit_core_dumps(signr);
43632
43633 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
43634 + gr_handle_brute_attach(current, cprm.mm_flags);
43635 +
43636 binfmt = mm->binfmt;
43637 if (!binfmt || !binfmt->core_dump)
43638 goto fail;
43639 @@ -2167,7 +2507,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43640 }
43641 cprm.limit = RLIM_INFINITY;
43642
43643 - dump_count = atomic_inc_return(&core_dump_count);
43644 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
43645 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
43646 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
43647 task_tgid_vnr(current), current->comm);
43648 @@ -2194,6 +2534,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43649 } else {
43650 struct inode *inode;
43651
43652 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
43653 +
43654 if (cprm.limit < binfmt->min_coredump)
43655 goto fail_unlock;
43656
43657 @@ -2237,7 +2579,7 @@ close_fail:
43658 filp_close(cprm.file, NULL);
43659 fail_dropcount:
43660 if (ispipe)
43661 - atomic_dec(&core_dump_count);
43662 + atomic_dec_unchecked(&core_dump_count);
43663 fail_unlock:
43664 kfree(cn.corename);
43665 fail_corename:
43666 @@ -2256,7 +2598,7 @@ fail:
43667 */
43668 int dump_write(struct file *file, const void *addr, int nr)
43669 {
43670 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
43671 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
43672 }
43673 EXPORT_SYMBOL(dump_write);
43674
43675 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
43676 index a8cbe1b..fed04cb 100644
43677 --- a/fs/ext2/balloc.c
43678 +++ b/fs/ext2/balloc.c
43679 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
43680
43681 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43682 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43683 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43684 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
43685 sbi->s_resuid != current_fsuid() &&
43686 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43687 return 0;
43688 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
43689 index a203892..4e64db5 100644
43690 --- a/fs/ext3/balloc.c
43691 +++ b/fs/ext3/balloc.c
43692 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
43693
43694 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43695 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43696 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43697 + if (free_blocks < root_blocks + 1 &&
43698 !use_reservation && sbi->s_resuid != current_fsuid() &&
43699 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43700 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
43701 + !capable_nolog(CAP_SYS_RESOURCE)) {
43702 return 0;
43703 }
43704 return 1;
43705 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
43706 index f9e2cd8..bfdc476 100644
43707 --- a/fs/ext4/balloc.c
43708 +++ b/fs/ext4/balloc.c
43709 @@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
43710 /* Hm, nope. Are (enough) root reserved clusters available? */
43711 if (sbi->s_resuid == current_fsuid() ||
43712 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
43713 - capable(CAP_SYS_RESOURCE) ||
43714 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
43715 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
43716 + capable_nolog(CAP_SYS_RESOURCE)) {
43717
43718 if (free_clusters >= (nclusters + dirty_clusters))
43719 return 1;
43720 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
43721 index 513004f..2591a6b 100644
43722 --- a/fs/ext4/ext4.h
43723 +++ b/fs/ext4/ext4.h
43724 @@ -1218,19 +1218,19 @@ struct ext4_sb_info {
43725 unsigned long s_mb_last_start;
43726
43727 /* stats for buddy allocator */
43728 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
43729 - atomic_t s_bal_success; /* we found long enough chunks */
43730 - atomic_t s_bal_allocated; /* in blocks */
43731 - atomic_t s_bal_ex_scanned; /* total extents scanned */
43732 - atomic_t s_bal_goals; /* goal hits */
43733 - atomic_t s_bal_breaks; /* too long searches */
43734 - atomic_t s_bal_2orders; /* 2^order hits */
43735 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
43736 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
43737 + atomic_unchecked_t s_bal_allocated; /* in blocks */
43738 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
43739 + atomic_unchecked_t s_bal_goals; /* goal hits */
43740 + atomic_unchecked_t s_bal_breaks; /* too long searches */
43741 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
43742 spinlock_t s_bal_lock;
43743 unsigned long s_mb_buddies_generated;
43744 unsigned long long s_mb_generation_time;
43745 - atomic_t s_mb_lost_chunks;
43746 - atomic_t s_mb_preallocated;
43747 - atomic_t s_mb_discarded;
43748 + atomic_unchecked_t s_mb_lost_chunks;
43749 + atomic_unchecked_t s_mb_preallocated;
43750 + atomic_unchecked_t s_mb_discarded;
43751 atomic_t s_lock_busy;
43752
43753 /* locality groups */
43754 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
43755 index cb990b2..4820141 100644
43756 --- a/fs/ext4/mballoc.c
43757 +++ b/fs/ext4/mballoc.c
43758 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
43759 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
43760
43761 if (EXT4_SB(sb)->s_mb_stats)
43762 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
43763 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
43764
43765 break;
43766 }
43767 @@ -2088,7 +2088,7 @@ repeat:
43768 ac->ac_status = AC_STATUS_CONTINUE;
43769 ac->ac_flags |= EXT4_MB_HINT_FIRST;
43770 cr = 3;
43771 - atomic_inc(&sbi->s_mb_lost_chunks);
43772 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
43773 goto repeat;
43774 }
43775 }
43776 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
43777 if (sbi->s_mb_stats) {
43778 ext4_msg(sb, KERN_INFO,
43779 "mballoc: %u blocks %u reqs (%u success)",
43780 - atomic_read(&sbi->s_bal_allocated),
43781 - atomic_read(&sbi->s_bal_reqs),
43782 - atomic_read(&sbi->s_bal_success));
43783 + atomic_read_unchecked(&sbi->s_bal_allocated),
43784 + atomic_read_unchecked(&sbi->s_bal_reqs),
43785 + atomic_read_unchecked(&sbi->s_bal_success));
43786 ext4_msg(sb, KERN_INFO,
43787 "mballoc: %u extents scanned, %u goal hits, "
43788 "%u 2^N hits, %u breaks, %u lost",
43789 - atomic_read(&sbi->s_bal_ex_scanned),
43790 - atomic_read(&sbi->s_bal_goals),
43791 - atomic_read(&sbi->s_bal_2orders),
43792 - atomic_read(&sbi->s_bal_breaks),
43793 - atomic_read(&sbi->s_mb_lost_chunks));
43794 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
43795 + atomic_read_unchecked(&sbi->s_bal_goals),
43796 + atomic_read_unchecked(&sbi->s_bal_2orders),
43797 + atomic_read_unchecked(&sbi->s_bal_breaks),
43798 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
43799 ext4_msg(sb, KERN_INFO,
43800 "mballoc: %lu generated and it took %Lu",
43801 sbi->s_mb_buddies_generated,
43802 sbi->s_mb_generation_time);
43803 ext4_msg(sb, KERN_INFO,
43804 "mballoc: %u preallocated, %u discarded",
43805 - atomic_read(&sbi->s_mb_preallocated),
43806 - atomic_read(&sbi->s_mb_discarded));
43807 + atomic_read_unchecked(&sbi->s_mb_preallocated),
43808 + atomic_read_unchecked(&sbi->s_mb_discarded));
43809 }
43810
43811 free_percpu(sbi->s_locality_groups);
43812 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
43813 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
43814
43815 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
43816 - atomic_inc(&sbi->s_bal_reqs);
43817 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43818 + atomic_inc_unchecked(&sbi->s_bal_reqs);
43819 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43820 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
43821 - atomic_inc(&sbi->s_bal_success);
43822 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
43823 + atomic_inc_unchecked(&sbi->s_bal_success);
43824 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
43825 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
43826 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
43827 - atomic_inc(&sbi->s_bal_goals);
43828 + atomic_inc_unchecked(&sbi->s_bal_goals);
43829 if (ac->ac_found > sbi->s_mb_max_to_scan)
43830 - atomic_inc(&sbi->s_bal_breaks);
43831 + atomic_inc_unchecked(&sbi->s_bal_breaks);
43832 }
43833
43834 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
43835 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
43836 trace_ext4_mb_new_inode_pa(ac, pa);
43837
43838 ext4_mb_use_inode_pa(ac, pa);
43839 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
43840 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
43841
43842 ei = EXT4_I(ac->ac_inode);
43843 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43844 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
43845 trace_ext4_mb_new_group_pa(ac, pa);
43846
43847 ext4_mb_use_group_pa(ac, pa);
43848 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43849 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43850
43851 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43852 lg = ac->ac_lg;
43853 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
43854 * from the bitmap and continue.
43855 */
43856 }
43857 - atomic_add(free, &sbi->s_mb_discarded);
43858 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
43859
43860 return err;
43861 }
43862 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
43863 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
43864 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
43865 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
43866 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43867 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43868 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
43869
43870 return 0;
43871 diff --git a/fs/fcntl.c b/fs/fcntl.c
43872 index 22764c7..86372c9 100644
43873 --- a/fs/fcntl.c
43874 +++ b/fs/fcntl.c
43875 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
43876 if (err)
43877 return err;
43878
43879 + if (gr_handle_chroot_fowner(pid, type))
43880 + return -ENOENT;
43881 + if (gr_check_protected_task_fowner(pid, type))
43882 + return -EACCES;
43883 +
43884 f_modown(filp, pid, type, force);
43885 return 0;
43886 }
43887 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
43888
43889 static int f_setown_ex(struct file *filp, unsigned long arg)
43890 {
43891 - struct f_owner_ex * __user owner_p = (void * __user)arg;
43892 + struct f_owner_ex __user *owner_p = (void __user *)arg;
43893 struct f_owner_ex owner;
43894 struct pid *pid;
43895 int type;
43896 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
43897
43898 static int f_getown_ex(struct file *filp, unsigned long arg)
43899 {
43900 - struct f_owner_ex * __user owner_p = (void * __user)arg;
43901 + struct f_owner_ex __user *owner_p = (void __user *)arg;
43902 struct f_owner_ex owner;
43903 int ret = 0;
43904
43905 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
43906 switch (cmd) {
43907 case F_DUPFD:
43908 case F_DUPFD_CLOEXEC:
43909 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
43910 if (arg >= rlimit(RLIMIT_NOFILE))
43911 break;
43912 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
43913 diff --git a/fs/fifo.c b/fs/fifo.c
43914 index b1a524d..4ee270e 100644
43915 --- a/fs/fifo.c
43916 +++ b/fs/fifo.c
43917 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
43918 */
43919 filp->f_op = &read_pipefifo_fops;
43920 pipe->r_counter++;
43921 - if (pipe->readers++ == 0)
43922 + if (atomic_inc_return(&pipe->readers) == 1)
43923 wake_up_partner(inode);
43924
43925 - if (!pipe->writers) {
43926 + if (!atomic_read(&pipe->writers)) {
43927 if ((filp->f_flags & O_NONBLOCK)) {
43928 /* suppress POLLHUP until we have
43929 * seen a writer */
43930 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
43931 * errno=ENXIO when there is no process reading the FIFO.
43932 */
43933 ret = -ENXIO;
43934 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
43935 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
43936 goto err;
43937
43938 filp->f_op = &write_pipefifo_fops;
43939 pipe->w_counter++;
43940 - if (!pipe->writers++)
43941 + if (atomic_inc_return(&pipe->writers) == 1)
43942 wake_up_partner(inode);
43943
43944 - if (!pipe->readers) {
43945 + if (!atomic_read(&pipe->readers)) {
43946 wait_for_partner(inode, &pipe->r_counter);
43947 if (signal_pending(current))
43948 goto err_wr;
43949 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
43950 */
43951 filp->f_op = &rdwr_pipefifo_fops;
43952
43953 - pipe->readers++;
43954 - pipe->writers++;
43955 + atomic_inc(&pipe->readers);
43956 + atomic_inc(&pipe->writers);
43957 pipe->r_counter++;
43958 pipe->w_counter++;
43959 - if (pipe->readers == 1 || pipe->writers == 1)
43960 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
43961 wake_up_partner(inode);
43962 break;
43963
43964 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
43965 return 0;
43966
43967 err_rd:
43968 - if (!--pipe->readers)
43969 + if (atomic_dec_and_test(&pipe->readers))
43970 wake_up_interruptible(&pipe->wait);
43971 ret = -ERESTARTSYS;
43972 goto err;
43973
43974 err_wr:
43975 - if (!--pipe->writers)
43976 + if (atomic_dec_and_test(&pipe->writers))
43977 wake_up_interruptible(&pipe->wait);
43978 ret = -ERESTARTSYS;
43979 goto err;
43980
43981 err:
43982 - if (!pipe->readers && !pipe->writers)
43983 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
43984 free_pipe_info(inode);
43985
43986 err_nocleanup:
43987 diff --git a/fs/file.c b/fs/file.c
43988 index 4c6992d..104cdea 100644
43989 --- a/fs/file.c
43990 +++ b/fs/file.c
43991 @@ -15,6 +15,7 @@
43992 #include <linux/slab.h>
43993 #include <linux/vmalloc.h>
43994 #include <linux/file.h>
43995 +#include <linux/security.h>
43996 #include <linux/fdtable.h>
43997 #include <linux/bitops.h>
43998 #include <linux/interrupt.h>
43999 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44000 * N.B. For clone tasks sharing a files structure, this test
44001 * will limit the total number of files that can be opened.
44002 */
44003 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44004 if (nr >= rlimit(RLIMIT_NOFILE))
44005 return -EMFILE;
44006
44007 diff --git a/fs/filesystems.c b/fs/filesystems.c
44008 index 96f2428..f5eeb8e 100644
44009 --- a/fs/filesystems.c
44010 +++ b/fs/filesystems.c
44011 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44012 int len = dot ? dot - name : strlen(name);
44013
44014 fs = __get_fs_type(name, len);
44015 +
44016 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44017 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44018 +#else
44019 if (!fs && (request_module("%.*s", len, name) == 0))
44020 +#endif
44021 fs = __get_fs_type(name, len);
44022
44023 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44024 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44025 index 78b519c..a8b4979 100644
44026 --- a/fs/fs_struct.c
44027 +++ b/fs/fs_struct.c
44028 @@ -4,6 +4,7 @@
44029 #include <linux/path.h>
44030 #include <linux/slab.h>
44031 #include <linux/fs_struct.h>
44032 +#include <linux/grsecurity.h>
44033 #include "internal.h"
44034
44035 static inline void path_get_longterm(struct path *path)
44036 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44037 old_root = fs->root;
44038 fs->root = *path;
44039 path_get_longterm(path);
44040 + gr_set_chroot_entries(current, path);
44041 write_seqcount_end(&fs->seq);
44042 spin_unlock(&fs->lock);
44043 if (old_root.dentry)
44044 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44045 && fs->root.mnt == old_root->mnt) {
44046 path_get_longterm(new_root);
44047 fs->root = *new_root;
44048 + gr_set_chroot_entries(p, new_root);
44049 count++;
44050 }
44051 if (fs->pwd.dentry == old_root->dentry
44052 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44053 spin_lock(&fs->lock);
44054 write_seqcount_begin(&fs->seq);
44055 tsk->fs = NULL;
44056 - kill = !--fs->users;
44057 + gr_clear_chroot_entries(tsk);
44058 + kill = !atomic_dec_return(&fs->users);
44059 write_seqcount_end(&fs->seq);
44060 spin_unlock(&fs->lock);
44061 task_unlock(tsk);
44062 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44063 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44064 /* We don't need to lock fs - think why ;-) */
44065 if (fs) {
44066 - fs->users = 1;
44067 + atomic_set(&fs->users, 1);
44068 fs->in_exec = 0;
44069 spin_lock_init(&fs->lock);
44070 seqcount_init(&fs->seq);
44071 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44072 spin_lock(&old->lock);
44073 fs->root = old->root;
44074 path_get_longterm(&fs->root);
44075 + /* instead of calling gr_set_chroot_entries here,
44076 + we call it from every caller of this function
44077 + */
44078 fs->pwd = old->pwd;
44079 path_get_longterm(&fs->pwd);
44080 spin_unlock(&old->lock);
44081 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44082
44083 task_lock(current);
44084 spin_lock(&fs->lock);
44085 - kill = !--fs->users;
44086 + kill = !atomic_dec_return(&fs->users);
44087 current->fs = new_fs;
44088 + gr_set_chroot_entries(current, &new_fs->root);
44089 spin_unlock(&fs->lock);
44090 task_unlock(current);
44091
44092 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44093
44094 int current_umask(void)
44095 {
44096 - return current->fs->umask;
44097 + return current->fs->umask | gr_acl_umask();
44098 }
44099 EXPORT_SYMBOL(current_umask);
44100
44101 /* to be mentioned only in INIT_TASK */
44102 struct fs_struct init_fs = {
44103 - .users = 1,
44104 + .users = ATOMIC_INIT(1),
44105 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44106 .seq = SEQCNT_ZERO,
44107 .umask = 0022,
44108 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44109 task_lock(current);
44110
44111 spin_lock(&init_fs.lock);
44112 - init_fs.users++;
44113 + atomic_inc(&init_fs.users);
44114 spin_unlock(&init_fs.lock);
44115
44116 spin_lock(&fs->lock);
44117 current->fs = &init_fs;
44118 - kill = !--fs->users;
44119 + gr_set_chroot_entries(current, &current->fs->root);
44120 + kill = !atomic_dec_return(&fs->users);
44121 spin_unlock(&fs->lock);
44122
44123 task_unlock(current);
44124 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44125 index 9905350..02eaec4 100644
44126 --- a/fs/fscache/cookie.c
44127 +++ b/fs/fscache/cookie.c
44128 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44129 parent ? (char *) parent->def->name : "<no-parent>",
44130 def->name, netfs_data);
44131
44132 - fscache_stat(&fscache_n_acquires);
44133 + fscache_stat_unchecked(&fscache_n_acquires);
44134
44135 /* if there's no parent cookie, then we don't create one here either */
44136 if (!parent) {
44137 - fscache_stat(&fscache_n_acquires_null);
44138 + fscache_stat_unchecked(&fscache_n_acquires_null);
44139 _leave(" [no parent]");
44140 return NULL;
44141 }
44142 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44143 /* allocate and initialise a cookie */
44144 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44145 if (!cookie) {
44146 - fscache_stat(&fscache_n_acquires_oom);
44147 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44148 _leave(" [ENOMEM]");
44149 return NULL;
44150 }
44151 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44152
44153 switch (cookie->def->type) {
44154 case FSCACHE_COOKIE_TYPE_INDEX:
44155 - fscache_stat(&fscache_n_cookie_index);
44156 + fscache_stat_unchecked(&fscache_n_cookie_index);
44157 break;
44158 case FSCACHE_COOKIE_TYPE_DATAFILE:
44159 - fscache_stat(&fscache_n_cookie_data);
44160 + fscache_stat_unchecked(&fscache_n_cookie_data);
44161 break;
44162 default:
44163 - fscache_stat(&fscache_n_cookie_special);
44164 + fscache_stat_unchecked(&fscache_n_cookie_special);
44165 break;
44166 }
44167
44168 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44169 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44170 atomic_dec(&parent->n_children);
44171 __fscache_cookie_put(cookie);
44172 - fscache_stat(&fscache_n_acquires_nobufs);
44173 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44174 _leave(" = NULL");
44175 return NULL;
44176 }
44177 }
44178
44179 - fscache_stat(&fscache_n_acquires_ok);
44180 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44181 _leave(" = %p", cookie);
44182 return cookie;
44183 }
44184 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44185 cache = fscache_select_cache_for_object(cookie->parent);
44186 if (!cache) {
44187 up_read(&fscache_addremove_sem);
44188 - fscache_stat(&fscache_n_acquires_no_cache);
44189 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44190 _leave(" = -ENOMEDIUM [no cache]");
44191 return -ENOMEDIUM;
44192 }
44193 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44194 object = cache->ops->alloc_object(cache, cookie);
44195 fscache_stat_d(&fscache_n_cop_alloc_object);
44196 if (IS_ERR(object)) {
44197 - fscache_stat(&fscache_n_object_no_alloc);
44198 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44199 ret = PTR_ERR(object);
44200 goto error;
44201 }
44202
44203 - fscache_stat(&fscache_n_object_alloc);
44204 + fscache_stat_unchecked(&fscache_n_object_alloc);
44205
44206 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44207
44208 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44209 struct fscache_object *object;
44210 struct hlist_node *_p;
44211
44212 - fscache_stat(&fscache_n_updates);
44213 + fscache_stat_unchecked(&fscache_n_updates);
44214
44215 if (!cookie) {
44216 - fscache_stat(&fscache_n_updates_null);
44217 + fscache_stat_unchecked(&fscache_n_updates_null);
44218 _leave(" [no cookie]");
44219 return;
44220 }
44221 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44222 struct fscache_object *object;
44223 unsigned long event;
44224
44225 - fscache_stat(&fscache_n_relinquishes);
44226 + fscache_stat_unchecked(&fscache_n_relinquishes);
44227 if (retire)
44228 - fscache_stat(&fscache_n_relinquishes_retire);
44229 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44230
44231 if (!cookie) {
44232 - fscache_stat(&fscache_n_relinquishes_null);
44233 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44234 _leave(" [no cookie]");
44235 return;
44236 }
44237 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44238
44239 /* wait for the cookie to finish being instantiated (or to fail) */
44240 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44241 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44242 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44243 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44244 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44245 }
44246 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44247 index f6aad48..88dcf26 100644
44248 --- a/fs/fscache/internal.h
44249 +++ b/fs/fscache/internal.h
44250 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44251 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44252 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44253
44254 -extern atomic_t fscache_n_op_pend;
44255 -extern atomic_t fscache_n_op_run;
44256 -extern atomic_t fscache_n_op_enqueue;
44257 -extern atomic_t fscache_n_op_deferred_release;
44258 -extern atomic_t fscache_n_op_release;
44259 -extern atomic_t fscache_n_op_gc;
44260 -extern atomic_t fscache_n_op_cancelled;
44261 -extern atomic_t fscache_n_op_rejected;
44262 +extern atomic_unchecked_t fscache_n_op_pend;
44263 +extern atomic_unchecked_t fscache_n_op_run;
44264 +extern atomic_unchecked_t fscache_n_op_enqueue;
44265 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44266 +extern atomic_unchecked_t fscache_n_op_release;
44267 +extern atomic_unchecked_t fscache_n_op_gc;
44268 +extern atomic_unchecked_t fscache_n_op_cancelled;
44269 +extern atomic_unchecked_t fscache_n_op_rejected;
44270
44271 -extern atomic_t fscache_n_attr_changed;
44272 -extern atomic_t fscache_n_attr_changed_ok;
44273 -extern atomic_t fscache_n_attr_changed_nobufs;
44274 -extern atomic_t fscache_n_attr_changed_nomem;
44275 -extern atomic_t fscache_n_attr_changed_calls;
44276 +extern atomic_unchecked_t fscache_n_attr_changed;
44277 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44278 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44279 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44280 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44281
44282 -extern atomic_t fscache_n_allocs;
44283 -extern atomic_t fscache_n_allocs_ok;
44284 -extern atomic_t fscache_n_allocs_wait;
44285 -extern atomic_t fscache_n_allocs_nobufs;
44286 -extern atomic_t fscache_n_allocs_intr;
44287 -extern atomic_t fscache_n_allocs_object_dead;
44288 -extern atomic_t fscache_n_alloc_ops;
44289 -extern atomic_t fscache_n_alloc_op_waits;
44290 +extern atomic_unchecked_t fscache_n_allocs;
44291 +extern atomic_unchecked_t fscache_n_allocs_ok;
44292 +extern atomic_unchecked_t fscache_n_allocs_wait;
44293 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44294 +extern atomic_unchecked_t fscache_n_allocs_intr;
44295 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44296 +extern atomic_unchecked_t fscache_n_alloc_ops;
44297 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44298
44299 -extern atomic_t fscache_n_retrievals;
44300 -extern atomic_t fscache_n_retrievals_ok;
44301 -extern atomic_t fscache_n_retrievals_wait;
44302 -extern atomic_t fscache_n_retrievals_nodata;
44303 -extern atomic_t fscache_n_retrievals_nobufs;
44304 -extern atomic_t fscache_n_retrievals_intr;
44305 -extern atomic_t fscache_n_retrievals_nomem;
44306 -extern atomic_t fscache_n_retrievals_object_dead;
44307 -extern atomic_t fscache_n_retrieval_ops;
44308 -extern atomic_t fscache_n_retrieval_op_waits;
44309 +extern atomic_unchecked_t fscache_n_retrievals;
44310 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44311 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44312 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44313 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44314 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44315 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44316 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44317 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44318 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44319
44320 -extern atomic_t fscache_n_stores;
44321 -extern atomic_t fscache_n_stores_ok;
44322 -extern atomic_t fscache_n_stores_again;
44323 -extern atomic_t fscache_n_stores_nobufs;
44324 -extern atomic_t fscache_n_stores_oom;
44325 -extern atomic_t fscache_n_store_ops;
44326 -extern atomic_t fscache_n_store_calls;
44327 -extern atomic_t fscache_n_store_pages;
44328 -extern atomic_t fscache_n_store_radix_deletes;
44329 -extern atomic_t fscache_n_store_pages_over_limit;
44330 +extern atomic_unchecked_t fscache_n_stores;
44331 +extern atomic_unchecked_t fscache_n_stores_ok;
44332 +extern atomic_unchecked_t fscache_n_stores_again;
44333 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44334 +extern atomic_unchecked_t fscache_n_stores_oom;
44335 +extern atomic_unchecked_t fscache_n_store_ops;
44336 +extern atomic_unchecked_t fscache_n_store_calls;
44337 +extern atomic_unchecked_t fscache_n_store_pages;
44338 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44339 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44340
44341 -extern atomic_t fscache_n_store_vmscan_not_storing;
44342 -extern atomic_t fscache_n_store_vmscan_gone;
44343 -extern atomic_t fscache_n_store_vmscan_busy;
44344 -extern atomic_t fscache_n_store_vmscan_cancelled;
44345 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44346 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44347 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44348 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44349
44350 -extern atomic_t fscache_n_marks;
44351 -extern atomic_t fscache_n_uncaches;
44352 +extern atomic_unchecked_t fscache_n_marks;
44353 +extern atomic_unchecked_t fscache_n_uncaches;
44354
44355 -extern atomic_t fscache_n_acquires;
44356 -extern atomic_t fscache_n_acquires_null;
44357 -extern atomic_t fscache_n_acquires_no_cache;
44358 -extern atomic_t fscache_n_acquires_ok;
44359 -extern atomic_t fscache_n_acquires_nobufs;
44360 -extern atomic_t fscache_n_acquires_oom;
44361 +extern atomic_unchecked_t fscache_n_acquires;
44362 +extern atomic_unchecked_t fscache_n_acquires_null;
44363 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44364 +extern atomic_unchecked_t fscache_n_acquires_ok;
44365 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44366 +extern atomic_unchecked_t fscache_n_acquires_oom;
44367
44368 -extern atomic_t fscache_n_updates;
44369 -extern atomic_t fscache_n_updates_null;
44370 -extern atomic_t fscache_n_updates_run;
44371 +extern atomic_unchecked_t fscache_n_updates;
44372 +extern atomic_unchecked_t fscache_n_updates_null;
44373 +extern atomic_unchecked_t fscache_n_updates_run;
44374
44375 -extern atomic_t fscache_n_relinquishes;
44376 -extern atomic_t fscache_n_relinquishes_null;
44377 -extern atomic_t fscache_n_relinquishes_waitcrt;
44378 -extern atomic_t fscache_n_relinquishes_retire;
44379 +extern atomic_unchecked_t fscache_n_relinquishes;
44380 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44381 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44382 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44383
44384 -extern atomic_t fscache_n_cookie_index;
44385 -extern atomic_t fscache_n_cookie_data;
44386 -extern atomic_t fscache_n_cookie_special;
44387 +extern atomic_unchecked_t fscache_n_cookie_index;
44388 +extern atomic_unchecked_t fscache_n_cookie_data;
44389 +extern atomic_unchecked_t fscache_n_cookie_special;
44390
44391 -extern atomic_t fscache_n_object_alloc;
44392 -extern atomic_t fscache_n_object_no_alloc;
44393 -extern atomic_t fscache_n_object_lookups;
44394 -extern atomic_t fscache_n_object_lookups_negative;
44395 -extern atomic_t fscache_n_object_lookups_positive;
44396 -extern atomic_t fscache_n_object_lookups_timed_out;
44397 -extern atomic_t fscache_n_object_created;
44398 -extern atomic_t fscache_n_object_avail;
44399 -extern atomic_t fscache_n_object_dead;
44400 +extern atomic_unchecked_t fscache_n_object_alloc;
44401 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44402 +extern atomic_unchecked_t fscache_n_object_lookups;
44403 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44404 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44405 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44406 +extern atomic_unchecked_t fscache_n_object_created;
44407 +extern atomic_unchecked_t fscache_n_object_avail;
44408 +extern atomic_unchecked_t fscache_n_object_dead;
44409
44410 -extern atomic_t fscache_n_checkaux_none;
44411 -extern atomic_t fscache_n_checkaux_okay;
44412 -extern atomic_t fscache_n_checkaux_update;
44413 -extern atomic_t fscache_n_checkaux_obsolete;
44414 +extern atomic_unchecked_t fscache_n_checkaux_none;
44415 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44416 +extern atomic_unchecked_t fscache_n_checkaux_update;
44417 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44418
44419 extern atomic_t fscache_n_cop_alloc_object;
44420 extern atomic_t fscache_n_cop_lookup_object;
44421 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44422 atomic_inc(stat);
44423 }
44424
44425 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44426 +{
44427 + atomic_inc_unchecked(stat);
44428 +}
44429 +
44430 static inline void fscache_stat_d(atomic_t *stat)
44431 {
44432 atomic_dec(stat);
44433 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44434
44435 #define __fscache_stat(stat) (NULL)
44436 #define fscache_stat(stat) do {} while (0)
44437 +#define fscache_stat_unchecked(stat) do {} while (0)
44438 #define fscache_stat_d(stat) do {} while (0)
44439 #endif
44440
44441 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44442 index b6b897c..0ffff9c 100644
44443 --- a/fs/fscache/object.c
44444 +++ b/fs/fscache/object.c
44445 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44446 /* update the object metadata on disk */
44447 case FSCACHE_OBJECT_UPDATING:
44448 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44449 - fscache_stat(&fscache_n_updates_run);
44450 + fscache_stat_unchecked(&fscache_n_updates_run);
44451 fscache_stat(&fscache_n_cop_update_object);
44452 object->cache->ops->update_object(object);
44453 fscache_stat_d(&fscache_n_cop_update_object);
44454 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44455 spin_lock(&object->lock);
44456 object->state = FSCACHE_OBJECT_DEAD;
44457 spin_unlock(&object->lock);
44458 - fscache_stat(&fscache_n_object_dead);
44459 + fscache_stat_unchecked(&fscache_n_object_dead);
44460 goto terminal_transit;
44461
44462 /* handle the parent cache of this object being withdrawn from
44463 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44464 spin_lock(&object->lock);
44465 object->state = FSCACHE_OBJECT_DEAD;
44466 spin_unlock(&object->lock);
44467 - fscache_stat(&fscache_n_object_dead);
44468 + fscache_stat_unchecked(&fscache_n_object_dead);
44469 goto terminal_transit;
44470
44471 /* complain about the object being woken up once it is
44472 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44473 parent->cookie->def->name, cookie->def->name,
44474 object->cache->tag->name);
44475
44476 - fscache_stat(&fscache_n_object_lookups);
44477 + fscache_stat_unchecked(&fscache_n_object_lookups);
44478 fscache_stat(&fscache_n_cop_lookup_object);
44479 ret = object->cache->ops->lookup_object(object);
44480 fscache_stat_d(&fscache_n_cop_lookup_object);
44481 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44482 if (ret == -ETIMEDOUT) {
44483 /* probably stuck behind another object, so move this one to
44484 * the back of the queue */
44485 - fscache_stat(&fscache_n_object_lookups_timed_out);
44486 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44487 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44488 }
44489
44490 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44491
44492 spin_lock(&object->lock);
44493 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44494 - fscache_stat(&fscache_n_object_lookups_negative);
44495 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44496
44497 /* transit here to allow write requests to begin stacking up
44498 * and read requests to begin returning ENODATA */
44499 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44500 * result, in which case there may be data available */
44501 spin_lock(&object->lock);
44502 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44503 - fscache_stat(&fscache_n_object_lookups_positive);
44504 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44505
44506 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44507
44508 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44509 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44510 } else {
44511 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44512 - fscache_stat(&fscache_n_object_created);
44513 + fscache_stat_unchecked(&fscache_n_object_created);
44514
44515 object->state = FSCACHE_OBJECT_AVAILABLE;
44516 spin_unlock(&object->lock);
44517 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44518 fscache_enqueue_dependents(object);
44519
44520 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44521 - fscache_stat(&fscache_n_object_avail);
44522 + fscache_stat_unchecked(&fscache_n_object_avail);
44523
44524 _leave("");
44525 }
44526 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44527 enum fscache_checkaux result;
44528
44529 if (!object->cookie->def->check_aux) {
44530 - fscache_stat(&fscache_n_checkaux_none);
44531 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44532 return FSCACHE_CHECKAUX_OKAY;
44533 }
44534
44535 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44536 switch (result) {
44537 /* entry okay as is */
44538 case FSCACHE_CHECKAUX_OKAY:
44539 - fscache_stat(&fscache_n_checkaux_okay);
44540 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44541 break;
44542
44543 /* entry requires update */
44544 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44545 - fscache_stat(&fscache_n_checkaux_update);
44546 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44547 break;
44548
44549 /* entry requires deletion */
44550 case FSCACHE_CHECKAUX_OBSOLETE:
44551 - fscache_stat(&fscache_n_checkaux_obsolete);
44552 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44553 break;
44554
44555 default:
44556 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44557 index 30afdfa..2256596 100644
44558 --- a/fs/fscache/operation.c
44559 +++ b/fs/fscache/operation.c
44560 @@ -17,7 +17,7 @@
44561 #include <linux/slab.h>
44562 #include "internal.h"
44563
44564 -atomic_t fscache_op_debug_id;
44565 +atomic_unchecked_t fscache_op_debug_id;
44566 EXPORT_SYMBOL(fscache_op_debug_id);
44567
44568 /**
44569 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44570 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44571 ASSERTCMP(atomic_read(&op->usage), >, 0);
44572
44573 - fscache_stat(&fscache_n_op_enqueue);
44574 + fscache_stat_unchecked(&fscache_n_op_enqueue);
44575 switch (op->flags & FSCACHE_OP_TYPE) {
44576 case FSCACHE_OP_ASYNC:
44577 _debug("queue async");
44578 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
44579 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
44580 if (op->processor)
44581 fscache_enqueue_operation(op);
44582 - fscache_stat(&fscache_n_op_run);
44583 + fscache_stat_unchecked(&fscache_n_op_run);
44584 }
44585
44586 /*
44587 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44588 if (object->n_ops > 1) {
44589 atomic_inc(&op->usage);
44590 list_add_tail(&op->pend_link, &object->pending_ops);
44591 - fscache_stat(&fscache_n_op_pend);
44592 + fscache_stat_unchecked(&fscache_n_op_pend);
44593 } else if (!list_empty(&object->pending_ops)) {
44594 atomic_inc(&op->usage);
44595 list_add_tail(&op->pend_link, &object->pending_ops);
44596 - fscache_stat(&fscache_n_op_pend);
44597 + fscache_stat_unchecked(&fscache_n_op_pend);
44598 fscache_start_operations(object);
44599 } else {
44600 ASSERTCMP(object->n_in_progress, ==, 0);
44601 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44602 object->n_exclusive++; /* reads and writes must wait */
44603 atomic_inc(&op->usage);
44604 list_add_tail(&op->pend_link, &object->pending_ops);
44605 - fscache_stat(&fscache_n_op_pend);
44606 + fscache_stat_unchecked(&fscache_n_op_pend);
44607 ret = 0;
44608 } else {
44609 /* not allowed to submit ops in any other state */
44610 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
44611 if (object->n_exclusive > 0) {
44612 atomic_inc(&op->usage);
44613 list_add_tail(&op->pend_link, &object->pending_ops);
44614 - fscache_stat(&fscache_n_op_pend);
44615 + fscache_stat_unchecked(&fscache_n_op_pend);
44616 } else if (!list_empty(&object->pending_ops)) {
44617 atomic_inc(&op->usage);
44618 list_add_tail(&op->pend_link, &object->pending_ops);
44619 - fscache_stat(&fscache_n_op_pend);
44620 + fscache_stat_unchecked(&fscache_n_op_pend);
44621 fscache_start_operations(object);
44622 } else {
44623 ASSERTCMP(object->n_exclusive, ==, 0);
44624 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
44625 object->n_ops++;
44626 atomic_inc(&op->usage);
44627 list_add_tail(&op->pend_link, &object->pending_ops);
44628 - fscache_stat(&fscache_n_op_pend);
44629 + fscache_stat_unchecked(&fscache_n_op_pend);
44630 ret = 0;
44631 } else if (object->state == FSCACHE_OBJECT_DYING ||
44632 object->state == FSCACHE_OBJECT_LC_DYING ||
44633 object->state == FSCACHE_OBJECT_WITHDRAWING) {
44634 - fscache_stat(&fscache_n_op_rejected);
44635 + fscache_stat_unchecked(&fscache_n_op_rejected);
44636 ret = -ENOBUFS;
44637 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
44638 fscache_report_unexpected_submission(object, op, ostate);
44639 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
44640
44641 ret = -EBUSY;
44642 if (!list_empty(&op->pend_link)) {
44643 - fscache_stat(&fscache_n_op_cancelled);
44644 + fscache_stat_unchecked(&fscache_n_op_cancelled);
44645 list_del_init(&op->pend_link);
44646 object->n_ops--;
44647 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
44648 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
44649 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
44650 BUG();
44651
44652 - fscache_stat(&fscache_n_op_release);
44653 + fscache_stat_unchecked(&fscache_n_op_release);
44654
44655 if (op->release) {
44656 op->release(op);
44657 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
44658 * lock, and defer it otherwise */
44659 if (!spin_trylock(&object->lock)) {
44660 _debug("defer put");
44661 - fscache_stat(&fscache_n_op_deferred_release);
44662 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
44663
44664 cache = object->cache;
44665 spin_lock(&cache->op_gc_list_lock);
44666 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
44667
44668 _debug("GC DEFERRED REL OBJ%x OP%x",
44669 object->debug_id, op->debug_id);
44670 - fscache_stat(&fscache_n_op_gc);
44671 + fscache_stat_unchecked(&fscache_n_op_gc);
44672
44673 ASSERTCMP(atomic_read(&op->usage), ==, 0);
44674
44675 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
44676 index 3f7a59b..cf196cc 100644
44677 --- a/fs/fscache/page.c
44678 +++ b/fs/fscache/page.c
44679 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44680 val = radix_tree_lookup(&cookie->stores, page->index);
44681 if (!val) {
44682 rcu_read_unlock();
44683 - fscache_stat(&fscache_n_store_vmscan_not_storing);
44684 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
44685 __fscache_uncache_page(cookie, page);
44686 return true;
44687 }
44688 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44689 spin_unlock(&cookie->stores_lock);
44690
44691 if (xpage) {
44692 - fscache_stat(&fscache_n_store_vmscan_cancelled);
44693 - fscache_stat(&fscache_n_store_radix_deletes);
44694 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
44695 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44696 ASSERTCMP(xpage, ==, page);
44697 } else {
44698 - fscache_stat(&fscache_n_store_vmscan_gone);
44699 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
44700 }
44701
44702 wake_up_bit(&cookie->flags, 0);
44703 @@ -107,7 +107,7 @@ page_busy:
44704 /* we might want to wait here, but that could deadlock the allocator as
44705 * the work threads writing to the cache may all end up sleeping
44706 * on memory allocation */
44707 - fscache_stat(&fscache_n_store_vmscan_busy);
44708 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
44709 return false;
44710 }
44711 EXPORT_SYMBOL(__fscache_maybe_release_page);
44712 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
44713 FSCACHE_COOKIE_STORING_TAG);
44714 if (!radix_tree_tag_get(&cookie->stores, page->index,
44715 FSCACHE_COOKIE_PENDING_TAG)) {
44716 - fscache_stat(&fscache_n_store_radix_deletes);
44717 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44718 xpage = radix_tree_delete(&cookie->stores, page->index);
44719 }
44720 spin_unlock(&cookie->stores_lock);
44721 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
44722
44723 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
44724
44725 - fscache_stat(&fscache_n_attr_changed_calls);
44726 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
44727
44728 if (fscache_object_is_active(object)) {
44729 fscache_stat(&fscache_n_cop_attr_changed);
44730 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44731
44732 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44733
44734 - fscache_stat(&fscache_n_attr_changed);
44735 + fscache_stat_unchecked(&fscache_n_attr_changed);
44736
44737 op = kzalloc(sizeof(*op), GFP_KERNEL);
44738 if (!op) {
44739 - fscache_stat(&fscache_n_attr_changed_nomem);
44740 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
44741 _leave(" = -ENOMEM");
44742 return -ENOMEM;
44743 }
44744 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44745 if (fscache_submit_exclusive_op(object, op) < 0)
44746 goto nobufs;
44747 spin_unlock(&cookie->lock);
44748 - fscache_stat(&fscache_n_attr_changed_ok);
44749 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
44750 fscache_put_operation(op);
44751 _leave(" = 0");
44752 return 0;
44753 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44754 nobufs:
44755 spin_unlock(&cookie->lock);
44756 kfree(op);
44757 - fscache_stat(&fscache_n_attr_changed_nobufs);
44758 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
44759 _leave(" = %d", -ENOBUFS);
44760 return -ENOBUFS;
44761 }
44762 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
44763 /* allocate a retrieval operation and attempt to submit it */
44764 op = kzalloc(sizeof(*op), GFP_NOIO);
44765 if (!op) {
44766 - fscache_stat(&fscache_n_retrievals_nomem);
44767 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44768 return NULL;
44769 }
44770
44771 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44772 return 0;
44773 }
44774
44775 - fscache_stat(&fscache_n_retrievals_wait);
44776 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
44777
44778 jif = jiffies;
44779 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
44780 fscache_wait_bit_interruptible,
44781 TASK_INTERRUPTIBLE) != 0) {
44782 - fscache_stat(&fscache_n_retrievals_intr);
44783 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44784 _leave(" = -ERESTARTSYS");
44785 return -ERESTARTSYS;
44786 }
44787 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44788 */
44789 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44790 struct fscache_retrieval *op,
44791 - atomic_t *stat_op_waits,
44792 - atomic_t *stat_object_dead)
44793 + atomic_unchecked_t *stat_op_waits,
44794 + atomic_unchecked_t *stat_object_dead)
44795 {
44796 int ret;
44797
44798 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44799 goto check_if_dead;
44800
44801 _debug(">>> WT");
44802 - fscache_stat(stat_op_waits);
44803 + fscache_stat_unchecked(stat_op_waits);
44804 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
44805 fscache_wait_bit_interruptible,
44806 TASK_INTERRUPTIBLE) < 0) {
44807 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44808
44809 check_if_dead:
44810 if (unlikely(fscache_object_is_dead(object))) {
44811 - fscache_stat(stat_object_dead);
44812 + fscache_stat_unchecked(stat_object_dead);
44813 return -ENOBUFS;
44814 }
44815 return 0;
44816 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44817
44818 _enter("%p,%p,,,", cookie, page);
44819
44820 - fscache_stat(&fscache_n_retrievals);
44821 + fscache_stat_unchecked(&fscache_n_retrievals);
44822
44823 if (hlist_empty(&cookie->backing_objects))
44824 goto nobufs;
44825 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44826 goto nobufs_unlock;
44827 spin_unlock(&cookie->lock);
44828
44829 - fscache_stat(&fscache_n_retrieval_ops);
44830 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
44831
44832 /* pin the netfs read context in case we need to do the actual netfs
44833 * read because we've encountered a cache read failure */
44834 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44835
44836 error:
44837 if (ret == -ENOMEM)
44838 - fscache_stat(&fscache_n_retrievals_nomem);
44839 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44840 else if (ret == -ERESTARTSYS)
44841 - fscache_stat(&fscache_n_retrievals_intr);
44842 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44843 else if (ret == -ENODATA)
44844 - fscache_stat(&fscache_n_retrievals_nodata);
44845 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44846 else if (ret < 0)
44847 - fscache_stat(&fscache_n_retrievals_nobufs);
44848 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44849 else
44850 - fscache_stat(&fscache_n_retrievals_ok);
44851 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
44852
44853 fscache_put_retrieval(op);
44854 _leave(" = %d", ret);
44855 @@ -429,7 +429,7 @@ nobufs_unlock:
44856 spin_unlock(&cookie->lock);
44857 kfree(op);
44858 nobufs:
44859 - fscache_stat(&fscache_n_retrievals_nobufs);
44860 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44861 _leave(" = -ENOBUFS");
44862 return -ENOBUFS;
44863 }
44864 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44865
44866 _enter("%p,,%d,,,", cookie, *nr_pages);
44867
44868 - fscache_stat(&fscache_n_retrievals);
44869 + fscache_stat_unchecked(&fscache_n_retrievals);
44870
44871 if (hlist_empty(&cookie->backing_objects))
44872 goto nobufs;
44873 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44874 goto nobufs_unlock;
44875 spin_unlock(&cookie->lock);
44876
44877 - fscache_stat(&fscache_n_retrieval_ops);
44878 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
44879
44880 /* pin the netfs read context in case we need to do the actual netfs
44881 * read because we've encountered a cache read failure */
44882 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44883
44884 error:
44885 if (ret == -ENOMEM)
44886 - fscache_stat(&fscache_n_retrievals_nomem);
44887 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44888 else if (ret == -ERESTARTSYS)
44889 - fscache_stat(&fscache_n_retrievals_intr);
44890 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44891 else if (ret == -ENODATA)
44892 - fscache_stat(&fscache_n_retrievals_nodata);
44893 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44894 else if (ret < 0)
44895 - fscache_stat(&fscache_n_retrievals_nobufs);
44896 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44897 else
44898 - fscache_stat(&fscache_n_retrievals_ok);
44899 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
44900
44901 fscache_put_retrieval(op);
44902 _leave(" = %d", ret);
44903 @@ -545,7 +545,7 @@ nobufs_unlock:
44904 spin_unlock(&cookie->lock);
44905 kfree(op);
44906 nobufs:
44907 - fscache_stat(&fscache_n_retrievals_nobufs);
44908 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44909 _leave(" = -ENOBUFS");
44910 return -ENOBUFS;
44911 }
44912 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44913
44914 _enter("%p,%p,,,", cookie, page);
44915
44916 - fscache_stat(&fscache_n_allocs);
44917 + fscache_stat_unchecked(&fscache_n_allocs);
44918
44919 if (hlist_empty(&cookie->backing_objects))
44920 goto nobufs;
44921 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44922 goto nobufs_unlock;
44923 spin_unlock(&cookie->lock);
44924
44925 - fscache_stat(&fscache_n_alloc_ops);
44926 + fscache_stat_unchecked(&fscache_n_alloc_ops);
44927
44928 ret = fscache_wait_for_retrieval_activation(
44929 object, op,
44930 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44931
44932 error:
44933 if (ret == -ERESTARTSYS)
44934 - fscache_stat(&fscache_n_allocs_intr);
44935 + fscache_stat_unchecked(&fscache_n_allocs_intr);
44936 else if (ret < 0)
44937 - fscache_stat(&fscache_n_allocs_nobufs);
44938 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44939 else
44940 - fscache_stat(&fscache_n_allocs_ok);
44941 + fscache_stat_unchecked(&fscache_n_allocs_ok);
44942
44943 fscache_put_retrieval(op);
44944 _leave(" = %d", ret);
44945 @@ -625,7 +625,7 @@ nobufs_unlock:
44946 spin_unlock(&cookie->lock);
44947 kfree(op);
44948 nobufs:
44949 - fscache_stat(&fscache_n_allocs_nobufs);
44950 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44951 _leave(" = -ENOBUFS");
44952 return -ENOBUFS;
44953 }
44954 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44955
44956 spin_lock(&cookie->stores_lock);
44957
44958 - fscache_stat(&fscache_n_store_calls);
44959 + fscache_stat_unchecked(&fscache_n_store_calls);
44960
44961 /* find a page to store */
44962 page = NULL;
44963 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44964 page = results[0];
44965 _debug("gang %d [%lx]", n, page->index);
44966 if (page->index > op->store_limit) {
44967 - fscache_stat(&fscache_n_store_pages_over_limit);
44968 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
44969 goto superseded;
44970 }
44971
44972 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44973 spin_unlock(&cookie->stores_lock);
44974 spin_unlock(&object->lock);
44975
44976 - fscache_stat(&fscache_n_store_pages);
44977 + fscache_stat_unchecked(&fscache_n_store_pages);
44978 fscache_stat(&fscache_n_cop_write_page);
44979 ret = object->cache->ops->write_page(op, page);
44980 fscache_stat_d(&fscache_n_cop_write_page);
44981 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44982 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44983 ASSERT(PageFsCache(page));
44984
44985 - fscache_stat(&fscache_n_stores);
44986 + fscache_stat_unchecked(&fscache_n_stores);
44987
44988 op = kzalloc(sizeof(*op), GFP_NOIO);
44989 if (!op)
44990 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44991 spin_unlock(&cookie->stores_lock);
44992 spin_unlock(&object->lock);
44993
44994 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
44995 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
44996 op->store_limit = object->store_limit;
44997
44998 if (fscache_submit_op(object, &op->op) < 0)
44999 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45000
45001 spin_unlock(&cookie->lock);
45002 radix_tree_preload_end();
45003 - fscache_stat(&fscache_n_store_ops);
45004 - fscache_stat(&fscache_n_stores_ok);
45005 + fscache_stat_unchecked(&fscache_n_store_ops);
45006 + fscache_stat_unchecked(&fscache_n_stores_ok);
45007
45008 /* the work queue now carries its own ref on the object */
45009 fscache_put_operation(&op->op);
45010 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45011 return 0;
45012
45013 already_queued:
45014 - fscache_stat(&fscache_n_stores_again);
45015 + fscache_stat_unchecked(&fscache_n_stores_again);
45016 already_pending:
45017 spin_unlock(&cookie->stores_lock);
45018 spin_unlock(&object->lock);
45019 spin_unlock(&cookie->lock);
45020 radix_tree_preload_end();
45021 kfree(op);
45022 - fscache_stat(&fscache_n_stores_ok);
45023 + fscache_stat_unchecked(&fscache_n_stores_ok);
45024 _leave(" = 0");
45025 return 0;
45026
45027 @@ -851,14 +851,14 @@ nobufs:
45028 spin_unlock(&cookie->lock);
45029 radix_tree_preload_end();
45030 kfree(op);
45031 - fscache_stat(&fscache_n_stores_nobufs);
45032 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45033 _leave(" = -ENOBUFS");
45034 return -ENOBUFS;
45035
45036 nomem_free:
45037 kfree(op);
45038 nomem:
45039 - fscache_stat(&fscache_n_stores_oom);
45040 + fscache_stat_unchecked(&fscache_n_stores_oom);
45041 _leave(" = -ENOMEM");
45042 return -ENOMEM;
45043 }
45044 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45045 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45046 ASSERTCMP(page, !=, NULL);
45047
45048 - fscache_stat(&fscache_n_uncaches);
45049 + fscache_stat_unchecked(&fscache_n_uncaches);
45050
45051 /* cache withdrawal may beat us to it */
45052 if (!PageFsCache(page))
45053 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45054 unsigned long loop;
45055
45056 #ifdef CONFIG_FSCACHE_STATS
45057 - atomic_add(pagevec->nr, &fscache_n_marks);
45058 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45059 #endif
45060
45061 for (loop = 0; loop < pagevec->nr; loop++) {
45062 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45063 index 4765190..2a067f2 100644
45064 --- a/fs/fscache/stats.c
45065 +++ b/fs/fscache/stats.c
45066 @@ -18,95 +18,95 @@
45067 /*
45068 * operation counters
45069 */
45070 -atomic_t fscache_n_op_pend;
45071 -atomic_t fscache_n_op_run;
45072 -atomic_t fscache_n_op_enqueue;
45073 -atomic_t fscache_n_op_requeue;
45074 -atomic_t fscache_n_op_deferred_release;
45075 -atomic_t fscache_n_op_release;
45076 -atomic_t fscache_n_op_gc;
45077 -atomic_t fscache_n_op_cancelled;
45078 -atomic_t fscache_n_op_rejected;
45079 +atomic_unchecked_t fscache_n_op_pend;
45080 +atomic_unchecked_t fscache_n_op_run;
45081 +atomic_unchecked_t fscache_n_op_enqueue;
45082 +atomic_unchecked_t fscache_n_op_requeue;
45083 +atomic_unchecked_t fscache_n_op_deferred_release;
45084 +atomic_unchecked_t fscache_n_op_release;
45085 +atomic_unchecked_t fscache_n_op_gc;
45086 +atomic_unchecked_t fscache_n_op_cancelled;
45087 +atomic_unchecked_t fscache_n_op_rejected;
45088
45089 -atomic_t fscache_n_attr_changed;
45090 -atomic_t fscache_n_attr_changed_ok;
45091 -atomic_t fscache_n_attr_changed_nobufs;
45092 -atomic_t fscache_n_attr_changed_nomem;
45093 -atomic_t fscache_n_attr_changed_calls;
45094 +atomic_unchecked_t fscache_n_attr_changed;
45095 +atomic_unchecked_t fscache_n_attr_changed_ok;
45096 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45097 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45098 +atomic_unchecked_t fscache_n_attr_changed_calls;
45099
45100 -atomic_t fscache_n_allocs;
45101 -atomic_t fscache_n_allocs_ok;
45102 -atomic_t fscache_n_allocs_wait;
45103 -atomic_t fscache_n_allocs_nobufs;
45104 -atomic_t fscache_n_allocs_intr;
45105 -atomic_t fscache_n_allocs_object_dead;
45106 -atomic_t fscache_n_alloc_ops;
45107 -atomic_t fscache_n_alloc_op_waits;
45108 +atomic_unchecked_t fscache_n_allocs;
45109 +atomic_unchecked_t fscache_n_allocs_ok;
45110 +atomic_unchecked_t fscache_n_allocs_wait;
45111 +atomic_unchecked_t fscache_n_allocs_nobufs;
45112 +atomic_unchecked_t fscache_n_allocs_intr;
45113 +atomic_unchecked_t fscache_n_allocs_object_dead;
45114 +atomic_unchecked_t fscache_n_alloc_ops;
45115 +atomic_unchecked_t fscache_n_alloc_op_waits;
45116
45117 -atomic_t fscache_n_retrievals;
45118 -atomic_t fscache_n_retrievals_ok;
45119 -atomic_t fscache_n_retrievals_wait;
45120 -atomic_t fscache_n_retrievals_nodata;
45121 -atomic_t fscache_n_retrievals_nobufs;
45122 -atomic_t fscache_n_retrievals_intr;
45123 -atomic_t fscache_n_retrievals_nomem;
45124 -atomic_t fscache_n_retrievals_object_dead;
45125 -atomic_t fscache_n_retrieval_ops;
45126 -atomic_t fscache_n_retrieval_op_waits;
45127 +atomic_unchecked_t fscache_n_retrievals;
45128 +atomic_unchecked_t fscache_n_retrievals_ok;
45129 +atomic_unchecked_t fscache_n_retrievals_wait;
45130 +atomic_unchecked_t fscache_n_retrievals_nodata;
45131 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45132 +atomic_unchecked_t fscache_n_retrievals_intr;
45133 +atomic_unchecked_t fscache_n_retrievals_nomem;
45134 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45135 +atomic_unchecked_t fscache_n_retrieval_ops;
45136 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45137
45138 -atomic_t fscache_n_stores;
45139 -atomic_t fscache_n_stores_ok;
45140 -atomic_t fscache_n_stores_again;
45141 -atomic_t fscache_n_stores_nobufs;
45142 -atomic_t fscache_n_stores_oom;
45143 -atomic_t fscache_n_store_ops;
45144 -atomic_t fscache_n_store_calls;
45145 -atomic_t fscache_n_store_pages;
45146 -atomic_t fscache_n_store_radix_deletes;
45147 -atomic_t fscache_n_store_pages_over_limit;
45148 +atomic_unchecked_t fscache_n_stores;
45149 +atomic_unchecked_t fscache_n_stores_ok;
45150 +atomic_unchecked_t fscache_n_stores_again;
45151 +atomic_unchecked_t fscache_n_stores_nobufs;
45152 +atomic_unchecked_t fscache_n_stores_oom;
45153 +atomic_unchecked_t fscache_n_store_ops;
45154 +atomic_unchecked_t fscache_n_store_calls;
45155 +atomic_unchecked_t fscache_n_store_pages;
45156 +atomic_unchecked_t fscache_n_store_radix_deletes;
45157 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45158
45159 -atomic_t fscache_n_store_vmscan_not_storing;
45160 -atomic_t fscache_n_store_vmscan_gone;
45161 -atomic_t fscache_n_store_vmscan_busy;
45162 -atomic_t fscache_n_store_vmscan_cancelled;
45163 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45164 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45165 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45166 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45167
45168 -atomic_t fscache_n_marks;
45169 -atomic_t fscache_n_uncaches;
45170 +atomic_unchecked_t fscache_n_marks;
45171 +atomic_unchecked_t fscache_n_uncaches;
45172
45173 -atomic_t fscache_n_acquires;
45174 -atomic_t fscache_n_acquires_null;
45175 -atomic_t fscache_n_acquires_no_cache;
45176 -atomic_t fscache_n_acquires_ok;
45177 -atomic_t fscache_n_acquires_nobufs;
45178 -atomic_t fscache_n_acquires_oom;
45179 +atomic_unchecked_t fscache_n_acquires;
45180 +atomic_unchecked_t fscache_n_acquires_null;
45181 +atomic_unchecked_t fscache_n_acquires_no_cache;
45182 +atomic_unchecked_t fscache_n_acquires_ok;
45183 +atomic_unchecked_t fscache_n_acquires_nobufs;
45184 +atomic_unchecked_t fscache_n_acquires_oom;
45185
45186 -atomic_t fscache_n_updates;
45187 -atomic_t fscache_n_updates_null;
45188 -atomic_t fscache_n_updates_run;
45189 +atomic_unchecked_t fscache_n_updates;
45190 +atomic_unchecked_t fscache_n_updates_null;
45191 +atomic_unchecked_t fscache_n_updates_run;
45192
45193 -atomic_t fscache_n_relinquishes;
45194 -atomic_t fscache_n_relinquishes_null;
45195 -atomic_t fscache_n_relinquishes_waitcrt;
45196 -atomic_t fscache_n_relinquishes_retire;
45197 +atomic_unchecked_t fscache_n_relinquishes;
45198 +atomic_unchecked_t fscache_n_relinquishes_null;
45199 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45200 +atomic_unchecked_t fscache_n_relinquishes_retire;
45201
45202 -atomic_t fscache_n_cookie_index;
45203 -atomic_t fscache_n_cookie_data;
45204 -atomic_t fscache_n_cookie_special;
45205 +atomic_unchecked_t fscache_n_cookie_index;
45206 +atomic_unchecked_t fscache_n_cookie_data;
45207 +atomic_unchecked_t fscache_n_cookie_special;
45208
45209 -atomic_t fscache_n_object_alloc;
45210 -atomic_t fscache_n_object_no_alloc;
45211 -atomic_t fscache_n_object_lookups;
45212 -atomic_t fscache_n_object_lookups_negative;
45213 -atomic_t fscache_n_object_lookups_positive;
45214 -atomic_t fscache_n_object_lookups_timed_out;
45215 -atomic_t fscache_n_object_created;
45216 -atomic_t fscache_n_object_avail;
45217 -atomic_t fscache_n_object_dead;
45218 +atomic_unchecked_t fscache_n_object_alloc;
45219 +atomic_unchecked_t fscache_n_object_no_alloc;
45220 +atomic_unchecked_t fscache_n_object_lookups;
45221 +atomic_unchecked_t fscache_n_object_lookups_negative;
45222 +atomic_unchecked_t fscache_n_object_lookups_positive;
45223 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45224 +atomic_unchecked_t fscache_n_object_created;
45225 +atomic_unchecked_t fscache_n_object_avail;
45226 +atomic_unchecked_t fscache_n_object_dead;
45227
45228 -atomic_t fscache_n_checkaux_none;
45229 -atomic_t fscache_n_checkaux_okay;
45230 -atomic_t fscache_n_checkaux_update;
45231 -atomic_t fscache_n_checkaux_obsolete;
45232 +atomic_unchecked_t fscache_n_checkaux_none;
45233 +atomic_unchecked_t fscache_n_checkaux_okay;
45234 +atomic_unchecked_t fscache_n_checkaux_update;
45235 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45236
45237 atomic_t fscache_n_cop_alloc_object;
45238 atomic_t fscache_n_cop_lookup_object;
45239 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45240 seq_puts(m, "FS-Cache statistics\n");
45241
45242 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45243 - atomic_read(&fscache_n_cookie_index),
45244 - atomic_read(&fscache_n_cookie_data),
45245 - atomic_read(&fscache_n_cookie_special));
45246 + atomic_read_unchecked(&fscache_n_cookie_index),
45247 + atomic_read_unchecked(&fscache_n_cookie_data),
45248 + atomic_read_unchecked(&fscache_n_cookie_special));
45249
45250 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45251 - atomic_read(&fscache_n_object_alloc),
45252 - atomic_read(&fscache_n_object_no_alloc),
45253 - atomic_read(&fscache_n_object_avail),
45254 - atomic_read(&fscache_n_object_dead));
45255 + atomic_read_unchecked(&fscache_n_object_alloc),
45256 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45257 + atomic_read_unchecked(&fscache_n_object_avail),
45258 + atomic_read_unchecked(&fscache_n_object_dead));
45259 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45260 - atomic_read(&fscache_n_checkaux_none),
45261 - atomic_read(&fscache_n_checkaux_okay),
45262 - atomic_read(&fscache_n_checkaux_update),
45263 - atomic_read(&fscache_n_checkaux_obsolete));
45264 + atomic_read_unchecked(&fscache_n_checkaux_none),
45265 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45266 + atomic_read_unchecked(&fscache_n_checkaux_update),
45267 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45268
45269 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45270 - atomic_read(&fscache_n_marks),
45271 - atomic_read(&fscache_n_uncaches));
45272 + atomic_read_unchecked(&fscache_n_marks),
45273 + atomic_read_unchecked(&fscache_n_uncaches));
45274
45275 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45276 " oom=%u\n",
45277 - atomic_read(&fscache_n_acquires),
45278 - atomic_read(&fscache_n_acquires_null),
45279 - atomic_read(&fscache_n_acquires_no_cache),
45280 - atomic_read(&fscache_n_acquires_ok),
45281 - atomic_read(&fscache_n_acquires_nobufs),
45282 - atomic_read(&fscache_n_acquires_oom));
45283 + atomic_read_unchecked(&fscache_n_acquires),
45284 + atomic_read_unchecked(&fscache_n_acquires_null),
45285 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45286 + atomic_read_unchecked(&fscache_n_acquires_ok),
45287 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45288 + atomic_read_unchecked(&fscache_n_acquires_oom));
45289
45290 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45291 - atomic_read(&fscache_n_object_lookups),
45292 - atomic_read(&fscache_n_object_lookups_negative),
45293 - atomic_read(&fscache_n_object_lookups_positive),
45294 - atomic_read(&fscache_n_object_created),
45295 - atomic_read(&fscache_n_object_lookups_timed_out));
45296 + atomic_read_unchecked(&fscache_n_object_lookups),
45297 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45298 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45299 + atomic_read_unchecked(&fscache_n_object_created),
45300 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45301
45302 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45303 - atomic_read(&fscache_n_updates),
45304 - atomic_read(&fscache_n_updates_null),
45305 - atomic_read(&fscache_n_updates_run));
45306 + atomic_read_unchecked(&fscache_n_updates),
45307 + atomic_read_unchecked(&fscache_n_updates_null),
45308 + atomic_read_unchecked(&fscache_n_updates_run));
45309
45310 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45311 - atomic_read(&fscache_n_relinquishes),
45312 - atomic_read(&fscache_n_relinquishes_null),
45313 - atomic_read(&fscache_n_relinquishes_waitcrt),
45314 - atomic_read(&fscache_n_relinquishes_retire));
45315 + atomic_read_unchecked(&fscache_n_relinquishes),
45316 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45317 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45318 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45319
45320 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45321 - atomic_read(&fscache_n_attr_changed),
45322 - atomic_read(&fscache_n_attr_changed_ok),
45323 - atomic_read(&fscache_n_attr_changed_nobufs),
45324 - atomic_read(&fscache_n_attr_changed_nomem),
45325 - atomic_read(&fscache_n_attr_changed_calls));
45326 + atomic_read_unchecked(&fscache_n_attr_changed),
45327 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45328 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45329 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45330 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45331
45332 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45333 - atomic_read(&fscache_n_allocs),
45334 - atomic_read(&fscache_n_allocs_ok),
45335 - atomic_read(&fscache_n_allocs_wait),
45336 - atomic_read(&fscache_n_allocs_nobufs),
45337 - atomic_read(&fscache_n_allocs_intr));
45338 + atomic_read_unchecked(&fscache_n_allocs),
45339 + atomic_read_unchecked(&fscache_n_allocs_ok),
45340 + atomic_read_unchecked(&fscache_n_allocs_wait),
45341 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45342 + atomic_read_unchecked(&fscache_n_allocs_intr));
45343 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45344 - atomic_read(&fscache_n_alloc_ops),
45345 - atomic_read(&fscache_n_alloc_op_waits),
45346 - atomic_read(&fscache_n_allocs_object_dead));
45347 + atomic_read_unchecked(&fscache_n_alloc_ops),
45348 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45349 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45350
45351 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45352 " int=%u oom=%u\n",
45353 - atomic_read(&fscache_n_retrievals),
45354 - atomic_read(&fscache_n_retrievals_ok),
45355 - atomic_read(&fscache_n_retrievals_wait),
45356 - atomic_read(&fscache_n_retrievals_nodata),
45357 - atomic_read(&fscache_n_retrievals_nobufs),
45358 - atomic_read(&fscache_n_retrievals_intr),
45359 - atomic_read(&fscache_n_retrievals_nomem));
45360 + atomic_read_unchecked(&fscache_n_retrievals),
45361 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45362 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45363 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45364 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45365 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45366 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45367 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45368 - atomic_read(&fscache_n_retrieval_ops),
45369 - atomic_read(&fscache_n_retrieval_op_waits),
45370 - atomic_read(&fscache_n_retrievals_object_dead));
45371 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45372 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45373 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45374
45375 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45376 - atomic_read(&fscache_n_stores),
45377 - atomic_read(&fscache_n_stores_ok),
45378 - atomic_read(&fscache_n_stores_again),
45379 - atomic_read(&fscache_n_stores_nobufs),
45380 - atomic_read(&fscache_n_stores_oom));
45381 + atomic_read_unchecked(&fscache_n_stores),
45382 + atomic_read_unchecked(&fscache_n_stores_ok),
45383 + atomic_read_unchecked(&fscache_n_stores_again),
45384 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45385 + atomic_read_unchecked(&fscache_n_stores_oom));
45386 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45387 - atomic_read(&fscache_n_store_ops),
45388 - atomic_read(&fscache_n_store_calls),
45389 - atomic_read(&fscache_n_store_pages),
45390 - atomic_read(&fscache_n_store_radix_deletes),
45391 - atomic_read(&fscache_n_store_pages_over_limit));
45392 + atomic_read_unchecked(&fscache_n_store_ops),
45393 + atomic_read_unchecked(&fscache_n_store_calls),
45394 + atomic_read_unchecked(&fscache_n_store_pages),
45395 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45396 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45397
45398 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45399 - atomic_read(&fscache_n_store_vmscan_not_storing),
45400 - atomic_read(&fscache_n_store_vmscan_gone),
45401 - atomic_read(&fscache_n_store_vmscan_busy),
45402 - atomic_read(&fscache_n_store_vmscan_cancelled));
45403 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45404 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45405 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45406 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45407
45408 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45409 - atomic_read(&fscache_n_op_pend),
45410 - atomic_read(&fscache_n_op_run),
45411 - atomic_read(&fscache_n_op_enqueue),
45412 - atomic_read(&fscache_n_op_cancelled),
45413 - atomic_read(&fscache_n_op_rejected));
45414 + atomic_read_unchecked(&fscache_n_op_pend),
45415 + atomic_read_unchecked(&fscache_n_op_run),
45416 + atomic_read_unchecked(&fscache_n_op_enqueue),
45417 + atomic_read_unchecked(&fscache_n_op_cancelled),
45418 + atomic_read_unchecked(&fscache_n_op_rejected));
45419 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45420 - atomic_read(&fscache_n_op_deferred_release),
45421 - atomic_read(&fscache_n_op_release),
45422 - atomic_read(&fscache_n_op_gc));
45423 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45424 + atomic_read_unchecked(&fscache_n_op_release),
45425 + atomic_read_unchecked(&fscache_n_op_gc));
45426
45427 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45428 atomic_read(&fscache_n_cop_alloc_object),
45429 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45430 index 3426521..3b75162 100644
45431 --- a/fs/fuse/cuse.c
45432 +++ b/fs/fuse/cuse.c
45433 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45434 INIT_LIST_HEAD(&cuse_conntbl[i]);
45435
45436 /* inherit and extend fuse_dev_operations */
45437 - cuse_channel_fops = fuse_dev_operations;
45438 - cuse_channel_fops.owner = THIS_MODULE;
45439 - cuse_channel_fops.open = cuse_channel_open;
45440 - cuse_channel_fops.release = cuse_channel_release;
45441 + pax_open_kernel();
45442 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45443 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45444 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45445 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45446 + pax_close_kernel();
45447
45448 cuse_class = class_create(THIS_MODULE, "cuse");
45449 if (IS_ERR(cuse_class))
45450 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45451 index 5f3368a..8306426 100644
45452 --- a/fs/fuse/dev.c
45453 +++ b/fs/fuse/dev.c
45454 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45455 ret = 0;
45456 pipe_lock(pipe);
45457
45458 - if (!pipe->readers) {
45459 + if (!atomic_read(&pipe->readers)) {
45460 send_sig(SIGPIPE, current, 0);
45461 if (!ret)
45462 ret = -EPIPE;
45463 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45464 index 2066328..f5add3b 100644
45465 --- a/fs/fuse/dir.c
45466 +++ b/fs/fuse/dir.c
45467 @@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
45468 return link;
45469 }
45470
45471 -static void free_link(char *link)
45472 +static void free_link(const char *link)
45473 {
45474 if (!IS_ERR(link))
45475 free_page((unsigned long) link);
45476 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45477 index 5698746..6086012 100644
45478 --- a/fs/gfs2/inode.c
45479 +++ b/fs/gfs2/inode.c
45480 @@ -1487,7 +1487,7 @@ out:
45481
45482 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45483 {
45484 - char *s = nd_get_link(nd);
45485 + const char *s = nd_get_link(nd);
45486 if (!IS_ERR(s))
45487 kfree(s);
45488 }
45489 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45490 index 1e85a7a..eb4218a 100644
45491 --- a/fs/hugetlbfs/inode.c
45492 +++ b/fs/hugetlbfs/inode.c
45493 @@ -921,7 +921,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45494 .kill_sb = kill_litter_super,
45495 };
45496
45497 -static struct vfsmount *hugetlbfs_vfsmount;
45498 +struct vfsmount *hugetlbfs_vfsmount;
45499
45500 static int can_do_hugetlb_shm(void)
45501 {
45502 diff --git a/fs/inode.c b/fs/inode.c
45503 index 83ab215..8842101 100644
45504 --- a/fs/inode.c
45505 +++ b/fs/inode.c
45506 @@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
45507
45508 #ifdef CONFIG_SMP
45509 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45510 - static atomic_t shared_last_ino;
45511 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45512 + static atomic_unchecked_t shared_last_ino;
45513 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45514
45515 res = next - LAST_INO_BATCH;
45516 }
45517 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45518 index eafb8d3..f423d37 100644
45519 --- a/fs/jffs2/erase.c
45520 +++ b/fs/jffs2/erase.c
45521 @@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45522 struct jffs2_unknown_node marker = {
45523 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45524 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45525 - .totlen = cpu_to_je32(c->cleanmarker_size)
45526 + .totlen = cpu_to_je32(c->cleanmarker_size),
45527 + .hdr_crc = cpu_to_je32(0)
45528 };
45529
45530 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45531 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45532 index 30e8f47..21f600c 100644
45533 --- a/fs/jffs2/wbuf.c
45534 +++ b/fs/jffs2/wbuf.c
45535 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45536 {
45537 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45538 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45539 - .totlen = constant_cpu_to_je32(8)
45540 + .totlen = constant_cpu_to_je32(8),
45541 + .hdr_crc = constant_cpu_to_je32(0)
45542 };
45543
45544 /*
45545 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45546 index 682bca6..86b8e6e 100644
45547 --- a/fs/jfs/super.c
45548 +++ b/fs/jfs/super.c
45549 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
45550
45551 jfs_inode_cachep =
45552 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45553 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45554 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45555 init_once);
45556 if (jfs_inode_cachep == NULL)
45557 return -ENOMEM;
45558 diff --git a/fs/libfs.c b/fs/libfs.c
45559 index 5b2dbb3..7442d54 100644
45560 --- a/fs/libfs.c
45561 +++ b/fs/libfs.c
45562 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45563
45564 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45565 struct dentry *next;
45566 + char d_name[sizeof(next->d_iname)];
45567 + const unsigned char *name;
45568 +
45569 next = list_entry(p, struct dentry, d_u.d_child);
45570 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45571 if (!simple_positive(next)) {
45572 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45573
45574 spin_unlock(&next->d_lock);
45575 spin_unlock(&dentry->d_lock);
45576 - if (filldir(dirent, next->d_name.name,
45577 + name = next->d_name.name;
45578 + if (name == next->d_iname) {
45579 + memcpy(d_name, name, next->d_name.len);
45580 + name = d_name;
45581 + }
45582 + if (filldir(dirent, name,
45583 next->d_name.len, filp->f_pos,
45584 next->d_inode->i_ino,
45585 dt_type(next->d_inode)) < 0)
45586 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
45587 index 8392cb8..80d6193 100644
45588 --- a/fs/lockd/clntproc.c
45589 +++ b/fs/lockd/clntproc.c
45590 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
45591 /*
45592 * Cookie counter for NLM requests
45593 */
45594 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
45595 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
45596
45597 void nlmclnt_next_cookie(struct nlm_cookie *c)
45598 {
45599 - u32 cookie = atomic_inc_return(&nlm_cookie);
45600 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
45601
45602 memcpy(c->data, &cookie, 4);
45603 c->len=4;
45604 diff --git a/fs/locks.c b/fs/locks.c
45605 index 637694b..f84a121 100644
45606 --- a/fs/locks.c
45607 +++ b/fs/locks.c
45608 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
45609 return;
45610
45611 if (filp->f_op && filp->f_op->flock) {
45612 - struct file_lock fl = {
45613 + struct file_lock flock = {
45614 .fl_pid = current->tgid,
45615 .fl_file = filp,
45616 .fl_flags = FL_FLOCK,
45617 .fl_type = F_UNLCK,
45618 .fl_end = OFFSET_MAX,
45619 };
45620 - filp->f_op->flock(filp, F_SETLKW, &fl);
45621 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
45622 - fl.fl_ops->fl_release_private(&fl);
45623 + filp->f_op->flock(filp, F_SETLKW, &flock);
45624 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
45625 + flock.fl_ops->fl_release_private(&flock);
45626 }
45627
45628 lock_flocks();
45629 diff --git a/fs/namei.c b/fs/namei.c
45630 index 46ea9cc..c7cf3a3 100644
45631 --- a/fs/namei.c
45632 +++ b/fs/namei.c
45633 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
45634 if (ret != -EACCES)
45635 return ret;
45636
45637 +#ifdef CONFIG_GRKERNSEC
45638 + /* we'll block if we have to log due to a denied capability use */
45639 + if (mask & MAY_NOT_BLOCK)
45640 + return -ECHILD;
45641 +#endif
45642 +
45643 if (S_ISDIR(inode->i_mode)) {
45644 /* DACs are overridable for directories */
45645 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45646 - return 0;
45647 if (!(mask & MAY_WRITE))
45648 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45649 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45650 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45651 return 0;
45652 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45653 + return 0;
45654 return -EACCES;
45655 }
45656 /*
45657 + * Searching includes executable on directories, else just read.
45658 + */
45659 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45660 + if (mask == MAY_READ)
45661 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45662 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45663 + return 0;
45664 +
45665 + /*
45666 * Read/write DACs are always overridable.
45667 * Executable DACs are overridable when there is
45668 * at least one exec bit set.
45669 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
45670 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45671 return 0;
45672
45673 - /*
45674 - * Searching includes executable on directories, else just read.
45675 - */
45676 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45677 - if (mask == MAY_READ)
45678 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45679 - return 0;
45680 -
45681 return -EACCES;
45682 }
45683
45684 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
45685 return error;
45686 }
45687
45688 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
45689 + dentry->d_inode, dentry, nd->path.mnt)) {
45690 + error = -EACCES;
45691 + *p = ERR_PTR(error); /* no ->put_link(), please */
45692 + path_put(&nd->path);
45693 + return error;
45694 + }
45695 +
45696 nd->last_type = LAST_BIND;
45697 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
45698 error = PTR_ERR(*p);
45699 if (!IS_ERR(*p)) {
45700 - char *s = nd_get_link(nd);
45701 + const char *s = nd_get_link(nd);
45702 error = 0;
45703 if (s)
45704 error = __vfs_follow_link(nd, s);
45705 @@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
45706 if (!err)
45707 err = complete_walk(nd);
45708
45709 + if (!(nd->flags & LOOKUP_PARENT)) {
45710 +#ifdef CONFIG_GRKERNSEC
45711 + if (flags & LOOKUP_RCU) {
45712 + if (!err)
45713 + path_put(&nd->path);
45714 + err = -ECHILD;
45715 + } else
45716 +#endif
45717 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45718 + if (!err)
45719 + path_put(&nd->path);
45720 + err = -ENOENT;
45721 + }
45722 + }
45723 +
45724 if (!err && nd->flags & LOOKUP_DIRECTORY) {
45725 if (!nd->inode->i_op->lookup) {
45726 path_put(&nd->path);
45727 @@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
45728 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
45729
45730 if (likely(!retval)) {
45731 + if (*name != '/' && nd->path.dentry && nd->inode) {
45732 +#ifdef CONFIG_GRKERNSEC
45733 + if (flags & LOOKUP_RCU)
45734 + return -ECHILD;
45735 +#endif
45736 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
45737 + return -ENOENT;
45738 + }
45739 +
45740 if (unlikely(!audit_dummy_context())) {
45741 if (nd->path.dentry && nd->inode)
45742 audit_inode(name, nd->path.dentry);
45743 @@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
45744 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
45745 return -EPERM;
45746
45747 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
45748 + return -EPERM;
45749 + if (gr_handle_rawio(inode))
45750 + return -EPERM;
45751 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
45752 + return -EACCES;
45753 +
45754 return 0;
45755 }
45756
45757 @@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45758 error = complete_walk(nd);
45759 if (error)
45760 return ERR_PTR(error);
45761 +#ifdef CONFIG_GRKERNSEC
45762 + if (nd->flags & LOOKUP_RCU) {
45763 + error = -ECHILD;
45764 + goto exit;
45765 + }
45766 +#endif
45767 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45768 + error = -ENOENT;
45769 + goto exit;
45770 + }
45771 audit_inode(pathname, nd->path.dentry);
45772 if (open_flag & O_CREAT) {
45773 error = -EISDIR;
45774 @@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45775 error = complete_walk(nd);
45776 if (error)
45777 return ERR_PTR(error);
45778 +#ifdef CONFIG_GRKERNSEC
45779 + if (nd->flags & LOOKUP_RCU) {
45780 + error = -ECHILD;
45781 + goto exit;
45782 + }
45783 +#endif
45784 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
45785 + error = -ENOENT;
45786 + goto exit;
45787 + }
45788 audit_inode(pathname, dir);
45789 goto ok;
45790 }
45791 @@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45792 error = complete_walk(nd);
45793 if (error)
45794 return ERR_PTR(error);
45795 +#ifdef CONFIG_GRKERNSEC
45796 + if (nd->flags & LOOKUP_RCU) {
45797 + error = -ECHILD;
45798 + goto exit;
45799 + }
45800 +#endif
45801 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45802 + error = -ENOENT;
45803 + goto exit;
45804 + }
45805
45806 error = -ENOTDIR;
45807 if (nd->flags & LOOKUP_DIRECTORY) {
45808 @@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45809 /* Negative dentry, just create the file */
45810 if (!dentry->d_inode) {
45811 umode_t mode = op->mode;
45812 +
45813 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
45814 + error = -EACCES;
45815 + goto exit_mutex_unlock;
45816 + }
45817 +
45818 if (!IS_POSIXACL(dir->d_inode))
45819 mode &= ~current_umask();
45820 /*
45821 @@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45822 error = vfs_create(dir->d_inode, dentry, mode, nd);
45823 if (error)
45824 goto exit_mutex_unlock;
45825 + else
45826 + gr_handle_create(path->dentry, path->mnt);
45827 mutex_unlock(&dir->d_inode->i_mutex);
45828 dput(nd->path.dentry);
45829 nd->path.dentry = dentry;
45830 @@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45831 /*
45832 * It already exists.
45833 */
45834 +
45835 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
45836 + error = -ENOENT;
45837 + goto exit_mutex_unlock;
45838 + }
45839 +
45840 + /* only check if O_CREAT is specified, all other checks need to go
45841 + into may_open */
45842 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
45843 + error = -EACCES;
45844 + goto exit_mutex_unlock;
45845 + }
45846 +
45847 mutex_unlock(&dir->d_inode->i_mutex);
45848 audit_inode(pathname, path->dentry);
45849
45850 @@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
45851 *path = nd.path;
45852 return dentry;
45853 eexist:
45854 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
45855 + dput(dentry);
45856 + dentry = ERR_PTR(-ENOENT);
45857 + goto fail;
45858 + }
45859 dput(dentry);
45860 dentry = ERR_PTR(-EEXIST);
45861 fail:
45862 @@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
45863 }
45864 EXPORT_SYMBOL(user_path_create);
45865
45866 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
45867 +{
45868 + char *tmp = getname(pathname);
45869 + struct dentry *res;
45870 + if (IS_ERR(tmp))
45871 + return ERR_CAST(tmp);
45872 + res = kern_path_create(dfd, tmp, path, is_dir);
45873 + if (IS_ERR(res))
45874 + putname(tmp);
45875 + else
45876 + *to = tmp;
45877 + return res;
45878 +}
45879 +
45880 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
45881 {
45882 int error = may_create(dir, dentry);
45883 @@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
45884 error = mnt_want_write(path.mnt);
45885 if (error)
45886 goto out_dput;
45887 +
45888 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
45889 + error = -EPERM;
45890 + goto out_drop_write;
45891 + }
45892 +
45893 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
45894 + error = -EACCES;
45895 + goto out_drop_write;
45896 + }
45897 +
45898 error = security_path_mknod(&path, dentry, mode, dev);
45899 if (error)
45900 goto out_drop_write;
45901 @@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
45902 }
45903 out_drop_write:
45904 mnt_drop_write(path.mnt);
45905 +
45906 + if (!error)
45907 + gr_handle_create(dentry, path.mnt);
45908 out_dput:
45909 dput(dentry);
45910 mutex_unlock(&path.dentry->d_inode->i_mutex);
45911 @@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
45912 error = mnt_want_write(path.mnt);
45913 if (error)
45914 goto out_dput;
45915 +
45916 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
45917 + error = -EACCES;
45918 + goto out_drop_write;
45919 + }
45920 +
45921 error = security_path_mkdir(&path, dentry, mode);
45922 if (error)
45923 goto out_drop_write;
45924 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
45925 out_drop_write:
45926 mnt_drop_write(path.mnt);
45927 +
45928 + if (!error)
45929 + gr_handle_create(dentry, path.mnt);
45930 out_dput:
45931 dput(dentry);
45932 mutex_unlock(&path.dentry->d_inode->i_mutex);
45933 @@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
45934 char * name;
45935 struct dentry *dentry;
45936 struct nameidata nd;
45937 + ino_t saved_ino = 0;
45938 + dev_t saved_dev = 0;
45939
45940 error = user_path_parent(dfd, pathname, &nd, &name);
45941 if (error)
45942 @@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
45943 error = -ENOENT;
45944 goto exit3;
45945 }
45946 +
45947 + saved_ino = dentry->d_inode->i_ino;
45948 + saved_dev = gr_get_dev_from_dentry(dentry);
45949 +
45950 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
45951 + error = -EACCES;
45952 + goto exit3;
45953 + }
45954 +
45955 error = mnt_want_write(nd.path.mnt);
45956 if (error)
45957 goto exit3;
45958 @@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
45959 if (error)
45960 goto exit4;
45961 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
45962 + if (!error && (saved_dev || saved_ino))
45963 + gr_handle_delete(saved_ino, saved_dev);
45964 exit4:
45965 mnt_drop_write(nd.path.mnt);
45966 exit3:
45967 @@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45968 struct dentry *dentry;
45969 struct nameidata nd;
45970 struct inode *inode = NULL;
45971 + ino_t saved_ino = 0;
45972 + dev_t saved_dev = 0;
45973
45974 error = user_path_parent(dfd, pathname, &nd, &name);
45975 if (error)
45976 @@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45977 if (!inode)
45978 goto slashes;
45979 ihold(inode);
45980 +
45981 + if (inode->i_nlink <= 1) {
45982 + saved_ino = inode->i_ino;
45983 + saved_dev = gr_get_dev_from_dentry(dentry);
45984 + }
45985 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
45986 + error = -EACCES;
45987 + goto exit2;
45988 + }
45989 +
45990 error = mnt_want_write(nd.path.mnt);
45991 if (error)
45992 goto exit2;
45993 @@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45994 if (error)
45995 goto exit3;
45996 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
45997 + if (!error && (saved_ino || saved_dev))
45998 + gr_handle_delete(saved_ino, saved_dev);
45999 exit3:
46000 mnt_drop_write(nd.path.mnt);
46001 exit2:
46002 @@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46003 error = mnt_want_write(path.mnt);
46004 if (error)
46005 goto out_dput;
46006 +
46007 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46008 + error = -EACCES;
46009 + goto out_drop_write;
46010 + }
46011 +
46012 error = security_path_symlink(&path, dentry, from);
46013 if (error)
46014 goto out_drop_write;
46015 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46016 + if (!error)
46017 + gr_handle_create(dentry, path.mnt);
46018 out_drop_write:
46019 mnt_drop_write(path.mnt);
46020 out_dput:
46021 @@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46022 {
46023 struct dentry *new_dentry;
46024 struct path old_path, new_path;
46025 + char *to = NULL;
46026 int how = 0;
46027 int error;
46028
46029 @@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46030 if (error)
46031 return error;
46032
46033 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46034 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46035 error = PTR_ERR(new_dentry);
46036 if (IS_ERR(new_dentry))
46037 goto out;
46038 @@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46039 error = mnt_want_write(new_path.mnt);
46040 if (error)
46041 goto out_dput;
46042 +
46043 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46044 + old_path.dentry->d_inode,
46045 + old_path.dentry->d_inode->i_mode, to)) {
46046 + error = -EACCES;
46047 + goto out_drop_write;
46048 + }
46049 +
46050 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46051 + old_path.dentry, old_path.mnt, to)) {
46052 + error = -EACCES;
46053 + goto out_drop_write;
46054 + }
46055 +
46056 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46057 if (error)
46058 goto out_drop_write;
46059 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46060 + if (!error)
46061 + gr_handle_create(new_dentry, new_path.mnt);
46062 out_drop_write:
46063 mnt_drop_write(new_path.mnt);
46064 out_dput:
46065 + putname(to);
46066 dput(new_dentry);
46067 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46068 path_put(&new_path);
46069 @@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46070 if (new_dentry == trap)
46071 goto exit5;
46072
46073 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46074 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46075 + to);
46076 + if (error)
46077 + goto exit5;
46078 +
46079 error = mnt_want_write(oldnd.path.mnt);
46080 if (error)
46081 goto exit5;
46082 @@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46083 goto exit6;
46084 error = vfs_rename(old_dir->d_inode, old_dentry,
46085 new_dir->d_inode, new_dentry);
46086 + if (!error)
46087 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46088 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46089 exit6:
46090 mnt_drop_write(oldnd.path.mnt);
46091 exit5:
46092 @@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46093
46094 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46095 {
46096 + char tmpbuf[64];
46097 + const char *newlink;
46098 int len;
46099
46100 len = PTR_ERR(link);
46101 @@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46102 len = strlen(link);
46103 if (len > (unsigned) buflen)
46104 len = buflen;
46105 - if (copy_to_user(buffer, link, len))
46106 +
46107 + if (len < sizeof(tmpbuf)) {
46108 + memcpy(tmpbuf, link, len);
46109 + newlink = tmpbuf;
46110 + } else
46111 + newlink = link;
46112 +
46113 + if (copy_to_user(buffer, newlink, len))
46114 len = -EFAULT;
46115 out:
46116 return len;
46117 diff --git a/fs/namespace.c b/fs/namespace.c
46118 index e608199..9609cb9 100644
46119 --- a/fs/namespace.c
46120 +++ b/fs/namespace.c
46121 @@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
46122 if (!(sb->s_flags & MS_RDONLY))
46123 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46124 up_write(&sb->s_umount);
46125 +
46126 + gr_log_remount(mnt->mnt_devname, retval);
46127 +
46128 return retval;
46129 }
46130
46131 @@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
46132 br_write_unlock(vfsmount_lock);
46133 up_write(&namespace_sem);
46134 release_mounts(&umount_list);
46135 +
46136 + gr_log_unmount(mnt->mnt_devname, retval);
46137 +
46138 return retval;
46139 }
46140
46141 @@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46142 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46143 MS_STRICTATIME);
46144
46145 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46146 + retval = -EPERM;
46147 + goto dput_out;
46148 + }
46149 +
46150 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46151 + retval = -EPERM;
46152 + goto dput_out;
46153 + }
46154 +
46155 if (flags & MS_REMOUNT)
46156 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46157 data_page);
46158 @@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46159 dev_name, data_page);
46160 dput_out:
46161 path_put(&path);
46162 +
46163 + gr_log_mount(dev_name, dir_name, retval);
46164 +
46165 return retval;
46166 }
46167
46168 @@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46169 if (error)
46170 goto out2;
46171
46172 + if (gr_handle_chroot_pivot()) {
46173 + error = -EPERM;
46174 + goto out2;
46175 + }
46176 +
46177 get_fs_root(current->fs, &root);
46178 error = lock_mount(&old);
46179 if (error)
46180 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46181 index f649fba..236bf92 100644
46182 --- a/fs/nfs/inode.c
46183 +++ b/fs/nfs/inode.c
46184 @@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46185 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46186 nfsi->attrtimeo_timestamp = jiffies;
46187
46188 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46189 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46190 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46191 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46192 else
46193 @@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46194 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46195 }
46196
46197 -static atomic_long_t nfs_attr_generation_counter;
46198 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46199
46200 static unsigned long nfs_read_attr_generation_counter(void)
46201 {
46202 - return atomic_long_read(&nfs_attr_generation_counter);
46203 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46204 }
46205
46206 unsigned long nfs_inc_attr_generation_counter(void)
46207 {
46208 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46209 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46210 }
46211
46212 void nfs_fattr_init(struct nfs_fattr *fattr)
46213 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46214 index edf6d3e..bdd1da7 100644
46215 --- a/fs/nfsd/vfs.c
46216 +++ b/fs/nfsd/vfs.c
46217 @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46218 } else {
46219 oldfs = get_fs();
46220 set_fs(KERNEL_DS);
46221 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46222 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46223 set_fs(oldfs);
46224 }
46225
46226 @@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46227
46228 /* Write the data. */
46229 oldfs = get_fs(); set_fs(KERNEL_DS);
46230 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46231 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46232 set_fs(oldfs);
46233 if (host_err < 0)
46234 goto out_nfserr;
46235 @@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46236 */
46237
46238 oldfs = get_fs(); set_fs(KERNEL_DS);
46239 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
46240 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
46241 set_fs(oldfs);
46242
46243 if (host_err < 0)
46244 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46245 index 3568c8a..e0240d8 100644
46246 --- a/fs/notify/fanotify/fanotify_user.c
46247 +++ b/fs/notify/fanotify/fanotify_user.c
46248 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46249 goto out_close_fd;
46250
46251 ret = -EFAULT;
46252 - if (copy_to_user(buf, &fanotify_event_metadata,
46253 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46254 + copy_to_user(buf, &fanotify_event_metadata,
46255 fanotify_event_metadata.event_len))
46256 goto out_kill_access_response;
46257
46258 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46259 index ee18815..7aa5d01 100644
46260 --- a/fs/notify/notification.c
46261 +++ b/fs/notify/notification.c
46262 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46263 * get set to 0 so it will never get 'freed'
46264 */
46265 static struct fsnotify_event *q_overflow_event;
46266 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46267 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46268
46269 /**
46270 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46271 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46272 */
46273 u32 fsnotify_get_cookie(void)
46274 {
46275 - return atomic_inc_return(&fsnotify_sync_cookie);
46276 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46277 }
46278 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46279
46280 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46281 index 99e3610..02c1068 100644
46282 --- a/fs/ntfs/dir.c
46283 +++ b/fs/ntfs/dir.c
46284 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46285 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46286 ~(s64)(ndir->itype.index.block_size - 1)));
46287 /* Bounds checks. */
46288 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46289 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46290 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46291 "inode 0x%lx or driver bug.", vdir->i_ino);
46292 goto err_out;
46293 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46294 index c587e2d..3641eaa 100644
46295 --- a/fs/ntfs/file.c
46296 +++ b/fs/ntfs/file.c
46297 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46298 #endif /* NTFS_RW */
46299 };
46300
46301 -const struct file_operations ntfs_empty_file_ops = {};
46302 +const struct file_operations ntfs_empty_file_ops __read_only;
46303
46304 -const struct inode_operations ntfs_empty_inode_ops = {};
46305 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46306 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46307 index 210c352..a174f83 100644
46308 --- a/fs/ocfs2/localalloc.c
46309 +++ b/fs/ocfs2/localalloc.c
46310 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46311 goto bail;
46312 }
46313
46314 - atomic_inc(&osb->alloc_stats.moves);
46315 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46316
46317 bail:
46318 if (handle)
46319 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46320 index d355e6e..578d905 100644
46321 --- a/fs/ocfs2/ocfs2.h
46322 +++ b/fs/ocfs2/ocfs2.h
46323 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46324
46325 struct ocfs2_alloc_stats
46326 {
46327 - atomic_t moves;
46328 - atomic_t local_data;
46329 - atomic_t bitmap_data;
46330 - atomic_t bg_allocs;
46331 - atomic_t bg_extends;
46332 + atomic_unchecked_t moves;
46333 + atomic_unchecked_t local_data;
46334 + atomic_unchecked_t bitmap_data;
46335 + atomic_unchecked_t bg_allocs;
46336 + atomic_unchecked_t bg_extends;
46337 };
46338
46339 enum ocfs2_local_alloc_state
46340 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46341 index ba5d97e..c77db25 100644
46342 --- a/fs/ocfs2/suballoc.c
46343 +++ b/fs/ocfs2/suballoc.c
46344 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46345 mlog_errno(status);
46346 goto bail;
46347 }
46348 - atomic_inc(&osb->alloc_stats.bg_extends);
46349 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46350
46351 /* You should never ask for this much metadata */
46352 BUG_ON(bits_wanted >
46353 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46354 mlog_errno(status);
46355 goto bail;
46356 }
46357 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46358 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46359
46360 *suballoc_loc = res.sr_bg_blkno;
46361 *suballoc_bit_start = res.sr_bit_offset;
46362 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46363 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46364 res->sr_bits);
46365
46366 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46367 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46368
46369 BUG_ON(res->sr_bits != 1);
46370
46371 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46372 mlog_errno(status);
46373 goto bail;
46374 }
46375 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46376 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46377
46378 BUG_ON(res.sr_bits != 1);
46379
46380 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46381 cluster_start,
46382 num_clusters);
46383 if (!status)
46384 - atomic_inc(&osb->alloc_stats.local_data);
46385 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46386 } else {
46387 if (min_clusters > (osb->bitmap_cpg - 1)) {
46388 /* The only paths asking for contiguousness
46389 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46390 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46391 res.sr_bg_blkno,
46392 res.sr_bit_offset);
46393 - atomic_inc(&osb->alloc_stats.bitmap_data);
46394 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46395 *num_clusters = res.sr_bits;
46396 }
46397 }
46398 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46399 index 604e12c..8426483 100644
46400 --- a/fs/ocfs2/super.c
46401 +++ b/fs/ocfs2/super.c
46402 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46403 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46404 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46405 "Stats",
46406 - atomic_read(&osb->alloc_stats.bitmap_data),
46407 - atomic_read(&osb->alloc_stats.local_data),
46408 - atomic_read(&osb->alloc_stats.bg_allocs),
46409 - atomic_read(&osb->alloc_stats.moves),
46410 - atomic_read(&osb->alloc_stats.bg_extends));
46411 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46412 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46413 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46414 + atomic_read_unchecked(&osb->alloc_stats.moves),
46415 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46416
46417 out += snprintf(buf + out, len - out,
46418 "%10s => State: %u Descriptor: %llu Size: %u bits "
46419 @@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46420 spin_lock_init(&osb->osb_xattr_lock);
46421 ocfs2_init_steal_slots(osb);
46422
46423 - atomic_set(&osb->alloc_stats.moves, 0);
46424 - atomic_set(&osb->alloc_stats.local_data, 0);
46425 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46426 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46427 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46428 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46429 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46430 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46431 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46432 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46433
46434 /* Copy the blockcheck stats from the superblock probe */
46435 osb->osb_ecc_stats = *stats;
46436 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46437 index 5d22872..523db20 100644
46438 --- a/fs/ocfs2/symlink.c
46439 +++ b/fs/ocfs2/symlink.c
46440 @@ -142,7 +142,7 @@ bail:
46441
46442 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46443 {
46444 - char *link = nd_get_link(nd);
46445 + const char *link = nd_get_link(nd);
46446 if (!IS_ERR(link))
46447 kfree(link);
46448 }
46449 diff --git a/fs/open.c b/fs/open.c
46450 index 77becc0..aad7bd9 100644
46451 --- a/fs/open.c
46452 +++ b/fs/open.c
46453 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46454 error = locks_verify_truncate(inode, NULL, length);
46455 if (!error)
46456 error = security_path_truncate(&path);
46457 +
46458 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46459 + error = -EACCES;
46460 +
46461 if (!error)
46462 error = do_truncate(path.dentry, length, 0, NULL);
46463
46464 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46465 if (__mnt_is_readonly(path.mnt))
46466 res = -EROFS;
46467
46468 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46469 + res = -EACCES;
46470 +
46471 out_path_release:
46472 path_put(&path);
46473 out:
46474 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46475 if (error)
46476 goto dput_and_out;
46477
46478 + gr_log_chdir(path.dentry, path.mnt);
46479 +
46480 set_fs_pwd(current->fs, &path);
46481
46482 dput_and_out:
46483 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46484 goto out_putf;
46485
46486 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46487 +
46488 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46489 + error = -EPERM;
46490 +
46491 + if (!error)
46492 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46493 +
46494 if (!error)
46495 set_fs_pwd(current->fs, &file->f_path);
46496 out_putf:
46497 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46498 if (error)
46499 goto dput_and_out;
46500
46501 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46502 + goto dput_and_out;
46503 +
46504 set_fs_root(current->fs, &path);
46505 +
46506 + gr_handle_chroot_chdir(&path);
46507 +
46508 error = 0;
46509 dput_and_out:
46510 path_put(&path);
46511 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
46512 if (error)
46513 return error;
46514 mutex_lock(&inode->i_mutex);
46515 +
46516 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46517 + error = -EACCES;
46518 + goto out_unlock;
46519 + }
46520 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46521 + error = -EACCES;
46522 + goto out_unlock;
46523 + }
46524 +
46525 error = security_path_chmod(path, mode);
46526 if (error)
46527 goto out_unlock;
46528 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46529 int error;
46530 struct iattr newattrs;
46531
46532 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
46533 + return -EACCES;
46534 +
46535 newattrs.ia_valid = ATTR_CTIME;
46536 if (user != (uid_t) -1) {
46537 newattrs.ia_valid |= ATTR_UID;
46538 diff --git a/fs/pipe.c b/fs/pipe.c
46539 index a932ced..6495412 100644
46540 --- a/fs/pipe.c
46541 +++ b/fs/pipe.c
46542 @@ -420,9 +420,9 @@ redo:
46543 }
46544 if (bufs) /* More to do? */
46545 continue;
46546 - if (!pipe->writers)
46547 + if (!atomic_read(&pipe->writers))
46548 break;
46549 - if (!pipe->waiting_writers) {
46550 + if (!atomic_read(&pipe->waiting_writers)) {
46551 /* syscall merging: Usually we must not sleep
46552 * if O_NONBLOCK is set, or if we got some data.
46553 * But if a writer sleeps in kernel space, then
46554 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46555 mutex_lock(&inode->i_mutex);
46556 pipe = inode->i_pipe;
46557
46558 - if (!pipe->readers) {
46559 + if (!atomic_read(&pipe->readers)) {
46560 send_sig(SIGPIPE, current, 0);
46561 ret = -EPIPE;
46562 goto out;
46563 @@ -530,7 +530,7 @@ redo1:
46564 for (;;) {
46565 int bufs;
46566
46567 - if (!pipe->readers) {
46568 + if (!atomic_read(&pipe->readers)) {
46569 send_sig(SIGPIPE, current, 0);
46570 if (!ret)
46571 ret = -EPIPE;
46572 @@ -616,9 +616,9 @@ redo2:
46573 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46574 do_wakeup = 0;
46575 }
46576 - pipe->waiting_writers++;
46577 + atomic_inc(&pipe->waiting_writers);
46578 pipe_wait(pipe);
46579 - pipe->waiting_writers--;
46580 + atomic_dec(&pipe->waiting_writers);
46581 }
46582 out:
46583 mutex_unlock(&inode->i_mutex);
46584 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46585 mask = 0;
46586 if (filp->f_mode & FMODE_READ) {
46587 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
46588 - if (!pipe->writers && filp->f_version != pipe->w_counter)
46589 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
46590 mask |= POLLHUP;
46591 }
46592
46593 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46594 * Most Unices do not set POLLERR for FIFOs but on Linux they
46595 * behave exactly like pipes for poll().
46596 */
46597 - if (!pipe->readers)
46598 + if (!atomic_read(&pipe->readers))
46599 mask |= POLLERR;
46600 }
46601
46602 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
46603
46604 mutex_lock(&inode->i_mutex);
46605 pipe = inode->i_pipe;
46606 - pipe->readers -= decr;
46607 - pipe->writers -= decw;
46608 + atomic_sub(decr, &pipe->readers);
46609 + atomic_sub(decw, &pipe->writers);
46610
46611 - if (!pipe->readers && !pipe->writers) {
46612 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
46613 free_pipe_info(inode);
46614 } else {
46615 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
46616 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
46617
46618 if (inode->i_pipe) {
46619 ret = 0;
46620 - inode->i_pipe->readers++;
46621 + atomic_inc(&inode->i_pipe->readers);
46622 }
46623
46624 mutex_unlock(&inode->i_mutex);
46625 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
46626
46627 if (inode->i_pipe) {
46628 ret = 0;
46629 - inode->i_pipe->writers++;
46630 + atomic_inc(&inode->i_pipe->writers);
46631 }
46632
46633 mutex_unlock(&inode->i_mutex);
46634 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
46635 if (inode->i_pipe) {
46636 ret = 0;
46637 if (filp->f_mode & FMODE_READ)
46638 - inode->i_pipe->readers++;
46639 + atomic_inc(&inode->i_pipe->readers);
46640 if (filp->f_mode & FMODE_WRITE)
46641 - inode->i_pipe->writers++;
46642 + atomic_inc(&inode->i_pipe->writers);
46643 }
46644
46645 mutex_unlock(&inode->i_mutex);
46646 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
46647 inode->i_pipe = NULL;
46648 }
46649
46650 -static struct vfsmount *pipe_mnt __read_mostly;
46651 +struct vfsmount *pipe_mnt __read_mostly;
46652
46653 /*
46654 * pipefs_dname() is called from d_path().
46655 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
46656 goto fail_iput;
46657 inode->i_pipe = pipe;
46658
46659 - pipe->readers = pipe->writers = 1;
46660 + atomic_set(&pipe->readers, 1);
46661 + atomic_set(&pipe->writers, 1);
46662 inode->i_fop = &rdwr_pipefifo_fops;
46663
46664 /*
46665 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
46666 index 15af622..0e9f4467 100644
46667 --- a/fs/proc/Kconfig
46668 +++ b/fs/proc/Kconfig
46669 @@ -30,12 +30,12 @@ config PROC_FS
46670
46671 config PROC_KCORE
46672 bool "/proc/kcore support" if !ARM
46673 - depends on PROC_FS && MMU
46674 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46675
46676 config PROC_VMCORE
46677 bool "/proc/vmcore support"
46678 - depends on PROC_FS && CRASH_DUMP
46679 - default y
46680 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46681 + default n
46682 help
46683 Exports the dump image of crashed kernel in ELF format.
46684
46685 @@ -59,8 +59,8 @@ config PROC_SYSCTL
46686 limited in memory.
46687
46688 config PROC_PAGE_MONITOR
46689 - default y
46690 - depends on PROC_FS && MMU
46691 + default n
46692 + depends on PROC_FS && MMU && !GRKERNSEC
46693 bool "Enable /proc page monitoring" if EXPERT
46694 help
46695 Various /proc files exist to monitor process memory utilization:
46696 diff --git a/fs/proc/array.c b/fs/proc/array.c
46697 index c602b8d..a7de642 100644
46698 --- a/fs/proc/array.c
46699 +++ b/fs/proc/array.c
46700 @@ -60,6 +60,7 @@
46701 #include <linux/tty.h>
46702 #include <linux/string.h>
46703 #include <linux/mman.h>
46704 +#include <linux/grsecurity.h>
46705 #include <linux/proc_fs.h>
46706 #include <linux/ioport.h>
46707 #include <linux/uaccess.h>
46708 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
46709 seq_putc(m, '\n');
46710 }
46711
46712 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46713 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
46714 +{
46715 + if (p->mm)
46716 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
46717 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
46718 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
46719 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
46720 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
46721 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
46722 + else
46723 + seq_printf(m, "PaX:\t-----\n");
46724 +}
46725 +#endif
46726 +
46727 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46728 struct pid *pid, struct task_struct *task)
46729 {
46730 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46731 task_cpus_allowed(m, task);
46732 cpuset_task_status_allowed(m, task);
46733 task_context_switch_counts(m, task);
46734 +
46735 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46736 + task_pax(m, task);
46737 +#endif
46738 +
46739 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
46740 + task_grsec_rbac(m, task);
46741 +#endif
46742 +
46743 return 0;
46744 }
46745
46746 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46747 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46748 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46749 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46750 +#endif
46751 +
46752 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46753 struct pid *pid, struct task_struct *task, int whole)
46754 {
46755 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46756 char tcomm[sizeof(task->comm)];
46757 unsigned long flags;
46758
46759 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46760 + if (current->exec_id != m->exec_id) {
46761 + gr_log_badprocpid("stat");
46762 + return 0;
46763 + }
46764 +#endif
46765 +
46766 state = *get_task_state(task);
46767 vsize = eip = esp = 0;
46768 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
46769 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46770 gtime = task->gtime;
46771 }
46772
46773 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46774 + if (PAX_RAND_FLAGS(mm)) {
46775 + eip = 0;
46776 + esp = 0;
46777 + wchan = 0;
46778 + }
46779 +#endif
46780 +#ifdef CONFIG_GRKERNSEC_HIDESYM
46781 + wchan = 0;
46782 + eip =0;
46783 + esp =0;
46784 +#endif
46785 +
46786 /* scale priority and nice values from timeslices to -20..20 */
46787 /* to make it look like a "normal" Unix priority/nice value */
46788 priority = task_prio(task);
46789 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46790 vsize,
46791 mm ? get_mm_rss(mm) : 0,
46792 rsslim,
46793 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46794 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
46795 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
46796 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
46797 +#else
46798 mm ? (permitted ? mm->start_code : 1) : 0,
46799 mm ? (permitted ? mm->end_code : 1) : 0,
46800 (permitted && mm) ? mm->start_stack : 0,
46801 +#endif
46802 esp,
46803 eip,
46804 /* The signal information here is obsolete.
46805 @@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46806 struct pid *pid, struct task_struct *task)
46807 {
46808 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
46809 - struct mm_struct *mm = get_task_mm(task);
46810 + struct mm_struct *mm;
46811
46812 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46813 + if (current->exec_id != m->exec_id) {
46814 + gr_log_badprocpid("statm");
46815 + return 0;
46816 + }
46817 +#endif
46818 + mm = get_task_mm(task);
46819 if (mm) {
46820 size = task_statm(mm, &shared, &text, &data, &resident);
46821 mmput(mm);
46822 @@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46823
46824 return 0;
46825 }
46826 +
46827 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46828 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
46829 +{
46830 + u32 curr_ip = 0;
46831 + unsigned long flags;
46832 +
46833 + if (lock_task_sighand(task, &flags)) {
46834 + curr_ip = task->signal->curr_ip;
46835 + unlock_task_sighand(task, &flags);
46836 + }
46837 +
46838 + return sprintf(buffer, "%pI4\n", &curr_ip);
46839 +}
46840 +#endif
46841 diff --git a/fs/proc/base.c b/fs/proc/base.c
46842 index d4548dd..d101f84 100644
46843 --- a/fs/proc/base.c
46844 +++ b/fs/proc/base.c
46845 @@ -109,6 +109,14 @@ struct pid_entry {
46846 union proc_op op;
46847 };
46848
46849 +struct getdents_callback {
46850 + struct linux_dirent __user * current_dir;
46851 + struct linux_dirent __user * previous;
46852 + struct file * file;
46853 + int count;
46854 + int error;
46855 +};
46856 +
46857 #define NOD(NAME, MODE, IOP, FOP, OP) { \
46858 .name = (NAME), \
46859 .len = sizeof(NAME) - 1, \
46860 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
46861 if (!mm->arg_end)
46862 goto out_mm; /* Shh! No looking before we're done */
46863
46864 + if (gr_acl_handle_procpidmem(task))
46865 + goto out_mm;
46866 +
46867 len = mm->arg_end - mm->arg_start;
46868
46869 if (len > PAGE_SIZE)
46870 @@ -240,12 +251,28 @@ out:
46871 return res;
46872 }
46873
46874 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46875 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46876 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46877 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46878 +#endif
46879 +
46880 static int proc_pid_auxv(struct task_struct *task, char *buffer)
46881 {
46882 struct mm_struct *mm = mm_for_maps(task);
46883 int res = PTR_ERR(mm);
46884 if (mm && !IS_ERR(mm)) {
46885 unsigned int nwords = 0;
46886 +
46887 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46888 + /* allow if we're currently ptracing this task */
46889 + if (PAX_RAND_FLAGS(mm) &&
46890 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
46891 + mmput(mm);
46892 + return 0;
46893 + }
46894 +#endif
46895 +
46896 do {
46897 nwords += 2;
46898 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
46899 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
46900 }
46901
46902
46903 -#ifdef CONFIG_KALLSYMS
46904 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46905 /*
46906 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
46907 * Returns the resolved symbol. If that fails, simply return the address.
46908 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
46909 mutex_unlock(&task->signal->cred_guard_mutex);
46910 }
46911
46912 -#ifdef CONFIG_STACKTRACE
46913 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46914
46915 #define MAX_STACK_TRACE_DEPTH 64
46916
46917 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
46918 return count;
46919 }
46920
46921 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46922 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46923 static int proc_pid_syscall(struct task_struct *task, char *buffer)
46924 {
46925 long nr;
46926 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
46927 /************************************************************************/
46928
46929 /* permission checks */
46930 -static int proc_fd_access_allowed(struct inode *inode)
46931 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
46932 {
46933 struct task_struct *task;
46934 int allowed = 0;
46935 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
46936 */
46937 task = get_proc_task(inode);
46938 if (task) {
46939 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46940 + if (log)
46941 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46942 + else
46943 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
46944 put_task_struct(task);
46945 }
46946 return allowed;
46947 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
46948 struct task_struct *task,
46949 int hide_pid_min)
46950 {
46951 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46952 + return false;
46953 +
46954 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46955 + rcu_read_lock();
46956 + {
46957 + const struct cred *tmpcred = current_cred();
46958 + const struct cred *cred = __task_cred(task);
46959 +
46960 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
46961 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46962 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46963 +#endif
46964 + ) {
46965 + rcu_read_unlock();
46966 + return true;
46967 + }
46968 + }
46969 + rcu_read_unlock();
46970 +
46971 + if (!pid->hide_pid)
46972 + return false;
46973 +#endif
46974 +
46975 if (pid->hide_pid < hide_pid_min)
46976 return true;
46977 if (in_group_p(pid->pid_gid))
46978 return true;
46979 +
46980 return ptrace_may_access(task, PTRACE_MODE_READ);
46981 }
46982
46983 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
46984 put_task_struct(task);
46985
46986 if (!has_perms) {
46987 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46988 + {
46989 +#else
46990 if (pid->hide_pid == 2) {
46991 +#endif
46992 /*
46993 * Let's make getdents(), stat(), and open()
46994 * consistent with each other. If a process
46995 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
46996 file->f_mode |= FMODE_UNSIGNED_OFFSET;
46997 file->private_data = mm;
46998
46999 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47000 + file->f_version = current->exec_id;
47001 +#endif
47002 +
47003 return 0;
47004 }
47005
47006 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47007 ssize_t copied;
47008 char *page;
47009
47010 +#ifdef CONFIG_GRKERNSEC
47011 + if (write)
47012 + return -EPERM;
47013 +#endif
47014 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47015 + if (file->f_version != current->exec_id) {
47016 + gr_log_badprocpid("mem");
47017 + return 0;
47018 + }
47019 +#endif
47020 +
47021 if (!mm)
47022 return 0;
47023
47024 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47025 if (!task)
47026 goto out_no_task;
47027
47028 + if (gr_acl_handle_procpidmem(task))
47029 + goto out;
47030 +
47031 ret = -ENOMEM;
47032 page = (char *)__get_free_page(GFP_TEMPORARY);
47033 if (!page)
47034 @@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47035 path_put(&nd->path);
47036
47037 /* Are we allowed to snoop on the tasks file descriptors? */
47038 - if (!proc_fd_access_allowed(inode))
47039 + if (!proc_fd_access_allowed(inode, 0))
47040 goto out;
47041
47042 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47043 @@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47044 struct path path;
47045
47046 /* Are we allowed to snoop on the tasks file descriptors? */
47047 - if (!proc_fd_access_allowed(inode))
47048 - goto out;
47049 + /* logging this is needed for learning on chromium to work properly,
47050 + but we don't want to flood the logs from 'ps' which does a readlink
47051 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47052 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47053 + */
47054 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47055 + if (!proc_fd_access_allowed(inode,0))
47056 + goto out;
47057 + } else {
47058 + if (!proc_fd_access_allowed(inode,1))
47059 + goto out;
47060 + }
47061
47062 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47063 if (error)
47064 @@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47065 rcu_read_lock();
47066 cred = __task_cred(task);
47067 inode->i_uid = cred->euid;
47068 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47069 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47070 +#else
47071 inode->i_gid = cred->egid;
47072 +#endif
47073 rcu_read_unlock();
47074 }
47075 security_task_to_inode(task, inode);
47076 @@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47077 return -ENOENT;
47078 }
47079 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47080 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47081 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47082 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47083 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47084 +#endif
47085 task_dumpable(task)) {
47086 cred = __task_cred(task);
47087 stat->uid = cred->euid;
47088 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47089 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47090 +#else
47091 stat->gid = cred->egid;
47092 +#endif
47093 }
47094 }
47095 rcu_read_unlock();
47096 @@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47097
47098 if (task) {
47099 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47100 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47101 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47102 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47103 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47104 +#endif
47105 task_dumpable(task)) {
47106 rcu_read_lock();
47107 cred = __task_cred(task);
47108 inode->i_uid = cred->euid;
47109 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47110 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47111 +#else
47112 inode->i_gid = cred->egid;
47113 +#endif
47114 rcu_read_unlock();
47115 } else {
47116 inode->i_uid = 0;
47117 @@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47118 int fd = proc_fd(inode);
47119
47120 if (task) {
47121 - files = get_files_struct(task);
47122 + if (!gr_acl_handle_procpidmem(task))
47123 + files = get_files_struct(task);
47124 put_task_struct(task);
47125 }
47126 if (files) {
47127 @@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
47128 */
47129 static int proc_fd_permission(struct inode *inode, int mask)
47130 {
47131 + struct task_struct *task;
47132 int rv = generic_permission(inode, mask);
47133 - if (rv == 0)
47134 - return 0;
47135 +
47136 if (task_pid(current) == proc_pid(inode))
47137 rv = 0;
47138 +
47139 + task = get_proc_task(inode);
47140 + if (task == NULL)
47141 + return rv;
47142 +
47143 + if (gr_acl_handle_procpidmem(task))
47144 + rv = -EACCES;
47145 +
47146 + put_task_struct(task);
47147 +
47148 return rv;
47149 }
47150
47151 @@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47152 if (!task)
47153 goto out_no_task;
47154
47155 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47156 + goto out;
47157 +
47158 /*
47159 * Yes, it does not scale. And it should not. Don't add
47160 * new entries into /proc/<tgid>/ without very good reasons.
47161 @@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
47162 if (!task)
47163 goto out_no_task;
47164
47165 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47166 + goto out;
47167 +
47168 ret = 0;
47169 i = filp->f_pos;
47170 switch (i) {
47171 @@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47172 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47173 void *cookie)
47174 {
47175 - char *s = nd_get_link(nd);
47176 + const char *s = nd_get_link(nd);
47177 if (!IS_ERR(s))
47178 __putname(s);
47179 }
47180 @@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47181 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47182 #endif
47183 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47184 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47185 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47186 INF("syscall", S_IRUGO, proc_pid_syscall),
47187 #endif
47188 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47189 @@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47190 #ifdef CONFIG_SECURITY
47191 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47192 #endif
47193 -#ifdef CONFIG_KALLSYMS
47194 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47195 INF("wchan", S_IRUGO, proc_pid_wchan),
47196 #endif
47197 -#ifdef CONFIG_STACKTRACE
47198 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47199 ONE("stack", S_IRUGO, proc_pid_stack),
47200 #endif
47201 #ifdef CONFIG_SCHEDSTATS
47202 @@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47203 #ifdef CONFIG_HARDWALL
47204 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47205 #endif
47206 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47207 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47208 +#endif
47209 };
47210
47211 static int proc_tgid_base_readdir(struct file * filp,
47212 @@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47213 if (!inode)
47214 goto out;
47215
47216 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47217 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47218 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47219 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47220 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47221 +#else
47222 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47223 +#endif
47224 inode->i_op = &proc_tgid_base_inode_operations;
47225 inode->i_fop = &proc_tgid_base_operations;
47226 inode->i_flags|=S_IMMUTABLE;
47227 @@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47228 if (!task)
47229 goto out;
47230
47231 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47232 + goto out_put_task;
47233 +
47234 result = proc_pid_instantiate(dir, dentry, task, NULL);
47235 +out_put_task:
47236 put_task_struct(task);
47237 out:
47238 return result;
47239 @@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47240 static int fake_filldir(void *buf, const char *name, int namelen,
47241 loff_t offset, u64 ino, unsigned d_type)
47242 {
47243 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
47244 + __buf->error = -EINVAL;
47245 return 0;
47246 }
47247
47248 @@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
47249 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47250 #endif
47251 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47252 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47253 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47254 INF("syscall", S_IRUGO, proc_pid_syscall),
47255 #endif
47256 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47257 @@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
47258 #ifdef CONFIG_SECURITY
47259 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47260 #endif
47261 -#ifdef CONFIG_KALLSYMS
47262 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47263 INF("wchan", S_IRUGO, proc_pid_wchan),
47264 #endif
47265 -#ifdef CONFIG_STACKTRACE
47266 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47267 ONE("stack", S_IRUGO, proc_pid_stack),
47268 #endif
47269 #ifdef CONFIG_SCHEDSTATS
47270 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47271 index 82676e3..5f8518a 100644
47272 --- a/fs/proc/cmdline.c
47273 +++ b/fs/proc/cmdline.c
47274 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47275
47276 static int __init proc_cmdline_init(void)
47277 {
47278 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47279 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47280 +#else
47281 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47282 +#endif
47283 return 0;
47284 }
47285 module_init(proc_cmdline_init);
47286 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47287 index b143471..bb105e5 100644
47288 --- a/fs/proc/devices.c
47289 +++ b/fs/proc/devices.c
47290 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47291
47292 static int __init proc_devices_init(void)
47293 {
47294 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47295 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47296 +#else
47297 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47298 +#endif
47299 return 0;
47300 }
47301 module_init(proc_devices_init);
47302 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47303 index 84fd323..f698a32 100644
47304 --- a/fs/proc/inode.c
47305 +++ b/fs/proc/inode.c
47306 @@ -21,12 +21,18 @@
47307 #include <linux/seq_file.h>
47308 #include <linux/slab.h>
47309 #include <linux/mount.h>
47310 +#include <linux/grsecurity.h>
47311
47312 #include <asm/system.h>
47313 #include <asm/uaccess.h>
47314
47315 #include "internal.h"
47316
47317 +#ifdef CONFIG_PROC_SYSCTL
47318 +extern const struct inode_operations proc_sys_inode_operations;
47319 +extern const struct inode_operations proc_sys_dir_operations;
47320 +#endif
47321 +
47322 static void proc_evict_inode(struct inode *inode)
47323 {
47324 struct proc_dir_entry *de;
47325 @@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
47326 ns_ops = PROC_I(inode)->ns_ops;
47327 if (ns_ops && ns_ops->put)
47328 ns_ops->put(PROC_I(inode)->ns);
47329 +
47330 +#ifdef CONFIG_PROC_SYSCTL
47331 + if (inode->i_op == &proc_sys_inode_operations ||
47332 + inode->i_op == &proc_sys_dir_operations)
47333 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47334 +#endif
47335 +
47336 }
47337
47338 static struct kmem_cache * proc_inode_cachep;
47339 @@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47340 if (de->mode) {
47341 inode->i_mode = de->mode;
47342 inode->i_uid = de->uid;
47343 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47344 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47345 +#else
47346 inode->i_gid = de->gid;
47347 +#endif
47348 }
47349 if (de->size)
47350 inode->i_size = de->size;
47351 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47352 index 2925775..4f08fae 100644
47353 --- a/fs/proc/internal.h
47354 +++ b/fs/proc/internal.h
47355 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47356 struct pid *pid, struct task_struct *task);
47357 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47358 struct pid *pid, struct task_struct *task);
47359 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47360 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47361 +#endif
47362 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47363
47364 extern const struct file_operations proc_maps_operations;
47365 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47366 index d245cb2..f4e8498 100644
47367 --- a/fs/proc/kcore.c
47368 +++ b/fs/proc/kcore.c
47369 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47370 * the addresses in the elf_phdr on our list.
47371 */
47372 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47373 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47374 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47375 + if (tsz > buflen)
47376 tsz = buflen;
47377 -
47378 +
47379 while (buflen) {
47380 struct kcore_list *m;
47381
47382 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47383 kfree(elf_buf);
47384 } else {
47385 if (kern_addr_valid(start)) {
47386 - unsigned long n;
47387 + char *elf_buf;
47388 + mm_segment_t oldfs;
47389
47390 - n = copy_to_user(buffer, (char *)start, tsz);
47391 - /*
47392 - * We cannot distingush between fault on source
47393 - * and fault on destination. When this happens
47394 - * we clear too and hope it will trigger the
47395 - * EFAULT again.
47396 - */
47397 - if (n) {
47398 - if (clear_user(buffer + tsz - n,
47399 - n))
47400 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47401 + if (!elf_buf)
47402 + return -ENOMEM;
47403 + oldfs = get_fs();
47404 + set_fs(KERNEL_DS);
47405 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47406 + set_fs(oldfs);
47407 + if (copy_to_user(buffer, elf_buf, tsz)) {
47408 + kfree(elf_buf);
47409 return -EFAULT;
47410 + }
47411 }
47412 + set_fs(oldfs);
47413 + kfree(elf_buf);
47414 } else {
47415 if (clear_user(buffer, tsz))
47416 return -EFAULT;
47417 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47418
47419 static int open_kcore(struct inode *inode, struct file *filp)
47420 {
47421 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47422 + return -EPERM;
47423 +#endif
47424 if (!capable(CAP_SYS_RAWIO))
47425 return -EPERM;
47426 if (kcore_need_update)
47427 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47428 index 80e4645..53e5fcf 100644
47429 --- a/fs/proc/meminfo.c
47430 +++ b/fs/proc/meminfo.c
47431 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47432 vmi.used >> 10,
47433 vmi.largest_chunk >> 10
47434 #ifdef CONFIG_MEMORY_FAILURE
47435 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47436 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47437 #endif
47438 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47439 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47440 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47441 index b1822dd..df622cb 100644
47442 --- a/fs/proc/nommu.c
47443 +++ b/fs/proc/nommu.c
47444 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47445 if (len < 1)
47446 len = 1;
47447 seq_printf(m, "%*c", len, ' ');
47448 - seq_path(m, &file->f_path, "");
47449 + seq_path(m, &file->f_path, "\n\\");
47450 }
47451
47452 seq_putc(m, '\n');
47453 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47454 index 06e1cc1..177cd98 100644
47455 --- a/fs/proc/proc_net.c
47456 +++ b/fs/proc/proc_net.c
47457 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47458 struct task_struct *task;
47459 struct nsproxy *ns;
47460 struct net *net = NULL;
47461 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47462 + const struct cred *cred = current_cred();
47463 +#endif
47464 +
47465 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47466 + if (cred->fsuid)
47467 + return net;
47468 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47469 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47470 + return net;
47471 +#endif
47472
47473 rcu_read_lock();
47474 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47475 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47476 index a6b6217..1e0579d 100644
47477 --- a/fs/proc/proc_sysctl.c
47478 +++ b/fs/proc/proc_sysctl.c
47479 @@ -9,11 +9,13 @@
47480 #include <linux/namei.h>
47481 #include "internal.h"
47482
47483 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
47484 +
47485 static const struct dentry_operations proc_sys_dentry_operations;
47486 static const struct file_operations proc_sys_file_operations;
47487 -static const struct inode_operations proc_sys_inode_operations;
47488 +const struct inode_operations proc_sys_inode_operations;
47489 static const struct file_operations proc_sys_dir_file_operations;
47490 -static const struct inode_operations proc_sys_dir_operations;
47491 +const struct inode_operations proc_sys_dir_operations;
47492
47493 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47494 {
47495 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47496
47497 err = NULL;
47498 d_set_d_op(dentry, &proc_sys_dentry_operations);
47499 +
47500 + gr_handle_proc_create(dentry, inode);
47501 +
47502 d_add(dentry, inode);
47503
47504 + if (gr_handle_sysctl(p, MAY_EXEC))
47505 + err = ERR_PTR(-ENOENT);
47506 +
47507 out:
47508 sysctl_head_finish(head);
47509 return err;
47510 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47511 if (!table->proc_handler)
47512 goto out;
47513
47514 +#ifdef CONFIG_GRKERNSEC
47515 + error = -EPERM;
47516 + if (write && !capable(CAP_SYS_ADMIN))
47517 + goto out;
47518 +#endif
47519 +
47520 /* careful: calling conventions are nasty here */
47521 res = count;
47522 error = table->proc_handler(table, write, buf, &res, ppos);
47523 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
47524 return -ENOMEM;
47525 } else {
47526 d_set_d_op(child, &proc_sys_dentry_operations);
47527 +
47528 + gr_handle_proc_create(child, inode);
47529 +
47530 d_add(child, inode);
47531 }
47532 } else {
47533 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
47534 if (*pos < file->f_pos)
47535 continue;
47536
47537 + if (gr_handle_sysctl(table, 0))
47538 + continue;
47539 +
47540 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
47541 if (res)
47542 return res;
47543 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
47544 if (IS_ERR(head))
47545 return PTR_ERR(head);
47546
47547 + if (table && gr_handle_sysctl(table, MAY_EXEC))
47548 + return -ENOENT;
47549 +
47550 generic_fillattr(inode, stat);
47551 if (table)
47552 stat->mode = (stat->mode & S_IFMT) | table->mode;
47553 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
47554 .llseek = generic_file_llseek,
47555 };
47556
47557 -static const struct inode_operations proc_sys_inode_operations = {
47558 +const struct inode_operations proc_sys_inode_operations = {
47559 .permission = proc_sys_permission,
47560 .setattr = proc_sys_setattr,
47561 .getattr = proc_sys_getattr,
47562 };
47563
47564 -static const struct inode_operations proc_sys_dir_operations = {
47565 +const struct inode_operations proc_sys_dir_operations = {
47566 .lookup = proc_sys_lookup,
47567 .permission = proc_sys_permission,
47568 .setattr = proc_sys_setattr,
47569 diff --git a/fs/proc/root.c b/fs/proc/root.c
47570 index 46a15d8..335631a 100644
47571 --- a/fs/proc/root.c
47572 +++ b/fs/proc/root.c
47573 @@ -187,7 +187,15 @@ void __init proc_root_init(void)
47574 #ifdef CONFIG_PROC_DEVICETREE
47575 proc_device_tree_init();
47576 #endif
47577 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47578 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47579 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
47580 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47581 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47582 +#endif
47583 +#else
47584 proc_mkdir("bus", NULL);
47585 +#endif
47586 proc_sys_init();
47587 }
47588
47589 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
47590 index 7dcd2a2..b2f410e 100644
47591 --- a/fs/proc/task_mmu.c
47592 +++ b/fs/proc/task_mmu.c
47593 @@ -11,6 +11,7 @@
47594 #include <linux/rmap.h>
47595 #include <linux/swap.h>
47596 #include <linux/swapops.h>
47597 +#include <linux/grsecurity.h>
47598
47599 #include <asm/elf.h>
47600 #include <asm/uaccess.h>
47601 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47602 "VmExe:\t%8lu kB\n"
47603 "VmLib:\t%8lu kB\n"
47604 "VmPTE:\t%8lu kB\n"
47605 - "VmSwap:\t%8lu kB\n",
47606 - hiwater_vm << (PAGE_SHIFT-10),
47607 + "VmSwap:\t%8lu kB\n"
47608 +
47609 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47610 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
47611 +#endif
47612 +
47613 + ,hiwater_vm << (PAGE_SHIFT-10),
47614 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
47615 mm->locked_vm << (PAGE_SHIFT-10),
47616 mm->pinned_vm << (PAGE_SHIFT-10),
47617 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47618 data << (PAGE_SHIFT-10),
47619 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
47620 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
47621 - swap << (PAGE_SHIFT-10));
47622 + swap << (PAGE_SHIFT-10)
47623 +
47624 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47625 + , mm->context.user_cs_base, mm->context.user_cs_limit
47626 +#endif
47627 +
47628 + );
47629 }
47630
47631 unsigned long task_vsize(struct mm_struct *mm)
47632 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
47633 return ret;
47634 }
47635
47636 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47637 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47638 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47639 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47640 +#endif
47641 +
47642 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47643 {
47644 struct mm_struct *mm = vma->vm_mm;
47645 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47646 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
47647 }
47648
47649 - /* We don't show the stack guard page in /proc/maps */
47650 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47651 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
47652 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
47653 +#else
47654 start = vma->vm_start;
47655 - if (stack_guard_page_start(vma, start))
47656 - start += PAGE_SIZE;
47657 end = vma->vm_end;
47658 - if (stack_guard_page_end(vma, end))
47659 - end -= PAGE_SIZE;
47660 +#endif
47661
47662 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
47663 start,
47664 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47665 flags & VM_WRITE ? 'w' : '-',
47666 flags & VM_EXEC ? 'x' : '-',
47667 flags & VM_MAYSHARE ? 's' : 'p',
47668 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47669 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
47670 +#else
47671 pgoff,
47672 +#endif
47673 MAJOR(dev), MINOR(dev), ino, &len);
47674
47675 /*
47676 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47677 */
47678 if (file) {
47679 pad_len_spaces(m, len);
47680 - seq_path(m, &file->f_path, "\n");
47681 + seq_path(m, &file->f_path, "\n\\");
47682 } else {
47683 const char *name = arch_vma_name(vma);
47684 if (!name) {
47685 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47686 if (vma->vm_start <= mm->brk &&
47687 vma->vm_end >= mm->start_brk) {
47688 name = "[heap]";
47689 - } else if (vma->vm_start <= mm->start_stack &&
47690 - vma->vm_end >= mm->start_stack) {
47691 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
47692 + (vma->vm_start <= mm->start_stack &&
47693 + vma->vm_end >= mm->start_stack)) {
47694 name = "[stack]";
47695 }
47696 } else {
47697 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
47698 struct proc_maps_private *priv = m->private;
47699 struct task_struct *task = priv->task;
47700
47701 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47702 + if (current->exec_id != m->exec_id) {
47703 + gr_log_badprocpid("maps");
47704 + return 0;
47705 + }
47706 +#endif
47707 +
47708 show_map_vma(m, vma);
47709
47710 if (m->count < m->size) /* vma is copied successfully */
47711 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
47712 .private = &mss,
47713 };
47714
47715 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47716 + if (current->exec_id != m->exec_id) {
47717 + gr_log_badprocpid("smaps");
47718 + return 0;
47719 + }
47720 +#endif
47721 memset(&mss, 0, sizeof mss);
47722 - mss.vma = vma;
47723 - /* mmap_sem is held in m_start */
47724 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47725 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47726 -
47727 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47728 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
47729 +#endif
47730 + mss.vma = vma;
47731 + /* mmap_sem is held in m_start */
47732 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47733 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47734 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47735 + }
47736 +#endif
47737 show_map_vma(m, vma);
47738
47739 seq_printf(m,
47740 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
47741 "KernelPageSize: %8lu kB\n"
47742 "MMUPageSize: %8lu kB\n"
47743 "Locked: %8lu kB\n",
47744 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47745 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
47746 +#else
47747 (vma->vm_end - vma->vm_start) >> 10,
47748 +#endif
47749 mss.resident >> 10,
47750 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
47751 mss.shared_clean >> 10,
47752 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
47753 int n;
47754 char buffer[50];
47755
47756 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47757 + if (current->exec_id != m->exec_id) {
47758 + gr_log_badprocpid("numa_maps");
47759 + return 0;
47760 + }
47761 +#endif
47762 +
47763 if (!mm)
47764 return 0;
47765
47766 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
47767 mpol_to_str(buffer, sizeof(buffer), pol, 0);
47768 mpol_cond_put(pol);
47769
47770 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47771 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
47772 +#else
47773 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
47774 +#endif
47775
47776 if (file) {
47777 seq_printf(m, " file=");
47778 - seq_path(m, &file->f_path, "\n\t= ");
47779 + seq_path(m, &file->f_path, "\n\t\\= ");
47780 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
47781 seq_printf(m, " heap");
47782 } else if (vma->vm_start <= mm->start_stack &&
47783 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
47784 index 980de54..2a4db5f 100644
47785 --- a/fs/proc/task_nommu.c
47786 +++ b/fs/proc/task_nommu.c
47787 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47788 else
47789 bytes += kobjsize(mm);
47790
47791 - if (current->fs && current->fs->users > 1)
47792 + if (current->fs && atomic_read(&current->fs->users) > 1)
47793 sbytes += kobjsize(current->fs);
47794 else
47795 bytes += kobjsize(current->fs);
47796 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
47797
47798 if (file) {
47799 pad_len_spaces(m, len);
47800 - seq_path(m, &file->f_path, "");
47801 + seq_path(m, &file->f_path, "\n\\");
47802 } else if (mm) {
47803 if (vma->vm_start <= mm->start_stack &&
47804 vma->vm_end >= mm->start_stack) {
47805 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
47806 index d67908b..d13f6a6 100644
47807 --- a/fs/quota/netlink.c
47808 +++ b/fs/quota/netlink.c
47809 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
47810 void quota_send_warning(short type, unsigned int id, dev_t dev,
47811 const char warntype)
47812 {
47813 - static atomic_t seq;
47814 + static atomic_unchecked_t seq;
47815 struct sk_buff *skb;
47816 void *msg_head;
47817 int ret;
47818 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
47819 "VFS: Not enough memory to send quota warning.\n");
47820 return;
47821 }
47822 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
47823 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
47824 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
47825 if (!msg_head) {
47826 printk(KERN_ERR
47827 diff --git a/fs/readdir.c b/fs/readdir.c
47828 index 356f715..c918d38 100644
47829 --- a/fs/readdir.c
47830 +++ b/fs/readdir.c
47831 @@ -17,6 +17,7 @@
47832 #include <linux/security.h>
47833 #include <linux/syscalls.h>
47834 #include <linux/unistd.h>
47835 +#include <linux/namei.h>
47836
47837 #include <asm/uaccess.h>
47838
47839 @@ -67,6 +68,7 @@ struct old_linux_dirent {
47840
47841 struct readdir_callback {
47842 struct old_linux_dirent __user * dirent;
47843 + struct file * file;
47844 int result;
47845 };
47846
47847 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
47848 buf->result = -EOVERFLOW;
47849 return -EOVERFLOW;
47850 }
47851 +
47852 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47853 + return 0;
47854 +
47855 buf->result++;
47856 dirent = buf->dirent;
47857 if (!access_ok(VERIFY_WRITE, dirent,
47858 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
47859
47860 buf.result = 0;
47861 buf.dirent = dirent;
47862 + buf.file = file;
47863
47864 error = vfs_readdir(file, fillonedir, &buf);
47865 if (buf.result)
47866 @@ -142,6 +149,7 @@ struct linux_dirent {
47867 struct getdents_callback {
47868 struct linux_dirent __user * current_dir;
47869 struct linux_dirent __user * previous;
47870 + struct file * file;
47871 int count;
47872 int error;
47873 };
47874 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
47875 buf->error = -EOVERFLOW;
47876 return -EOVERFLOW;
47877 }
47878 +
47879 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47880 + return 0;
47881 +
47882 dirent = buf->previous;
47883 if (dirent) {
47884 if (__put_user(offset, &dirent->d_off))
47885 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
47886 buf.previous = NULL;
47887 buf.count = count;
47888 buf.error = 0;
47889 + buf.file = file;
47890
47891 error = vfs_readdir(file, filldir, &buf);
47892 if (error >= 0)
47893 @@ -229,6 +242,7 @@ out:
47894 struct getdents_callback64 {
47895 struct linux_dirent64 __user * current_dir;
47896 struct linux_dirent64 __user * previous;
47897 + struct file *file;
47898 int count;
47899 int error;
47900 };
47901 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
47902 buf->error = -EINVAL; /* only used if we fail.. */
47903 if (reclen > buf->count)
47904 return -EINVAL;
47905 +
47906 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47907 + return 0;
47908 +
47909 dirent = buf->previous;
47910 if (dirent) {
47911 if (__put_user(offset, &dirent->d_off))
47912 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
47913
47914 buf.current_dir = dirent;
47915 buf.previous = NULL;
47916 + buf.file = file;
47917 buf.count = count;
47918 buf.error = 0;
47919
47920 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
47921 error = buf.error;
47922 lastdirent = buf.previous;
47923 if (lastdirent) {
47924 - typeof(lastdirent->d_off) d_off = file->f_pos;
47925 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47926 if (__put_user(d_off, &lastdirent->d_off))
47927 error = -EFAULT;
47928 else
47929 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
47930 index 60c0804..d814f98 100644
47931 --- a/fs/reiserfs/do_balan.c
47932 +++ b/fs/reiserfs/do_balan.c
47933 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
47934 return;
47935 }
47936
47937 - atomic_inc(&(fs_generation(tb->tb_sb)));
47938 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
47939 do_balance_starts(tb);
47940
47941 /* balance leaf returns 0 except if combining L R and S into
47942 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
47943 index 7a99811..a7c96c4 100644
47944 --- a/fs/reiserfs/procfs.c
47945 +++ b/fs/reiserfs/procfs.c
47946 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
47947 "SMALL_TAILS " : "NO_TAILS ",
47948 replay_only(sb) ? "REPLAY_ONLY " : "",
47949 convert_reiserfs(sb) ? "CONV " : "",
47950 - atomic_read(&r->s_generation_counter),
47951 + atomic_read_unchecked(&r->s_generation_counter),
47952 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
47953 SF(s_do_balance), SF(s_unneeded_left_neighbor),
47954 SF(s_good_search_by_key_reada), SF(s_bmaps),
47955 diff --git a/fs/select.c b/fs/select.c
47956 index e782258..3b4b44c 100644
47957 --- a/fs/select.c
47958 +++ b/fs/select.c
47959 @@ -20,6 +20,7 @@
47960 #include <linux/module.h>
47961 #include <linux/slab.h>
47962 #include <linux/poll.h>
47963 +#include <linux/security.h>
47964 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
47965 #include <linux/file.h>
47966 #include <linux/fdtable.h>
47967 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
47968 struct poll_list *walk = head;
47969 unsigned long todo = nfds;
47970
47971 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
47972 if (nfds > rlimit(RLIMIT_NOFILE))
47973 return -EINVAL;
47974
47975 diff --git a/fs/seq_file.c b/fs/seq_file.c
47976 index 4023d6b..53b39c5 100644
47977 --- a/fs/seq_file.c
47978 +++ b/fs/seq_file.c
47979 @@ -9,6 +9,7 @@
47980 #include <linux/module.h>
47981 #include <linux/seq_file.h>
47982 #include <linux/slab.h>
47983 +#include <linux/sched.h>
47984
47985 #include <asm/uaccess.h>
47986 #include <asm/page.h>
47987 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
47988 memset(p, 0, sizeof(*p));
47989 mutex_init(&p->lock);
47990 p->op = op;
47991 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47992 + p->exec_id = current->exec_id;
47993 +#endif
47994
47995 /*
47996 * Wrappers around seq_open(e.g. swaps_open) need to be
47997 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
47998 return 0;
47999 }
48000 if (!m->buf) {
48001 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48002 + m->size = PAGE_SIZE;
48003 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48004 if (!m->buf)
48005 return -ENOMEM;
48006 }
48007 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48008 Eoverflow:
48009 m->op->stop(m, p);
48010 kfree(m->buf);
48011 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48012 + m->size <<= 1;
48013 + m->buf = kmalloc(m->size, GFP_KERNEL);
48014 return !m->buf ? -ENOMEM : -EAGAIN;
48015 }
48016
48017 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48018 m->version = file->f_version;
48019 /* grab buffer if we didn't have one */
48020 if (!m->buf) {
48021 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48022 + m->size = PAGE_SIZE;
48023 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48024 if (!m->buf)
48025 goto Enomem;
48026 }
48027 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48028 goto Fill;
48029 m->op->stop(m, p);
48030 kfree(m->buf);
48031 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48032 + m->size <<= 1;
48033 + m->buf = kmalloc(m->size, GFP_KERNEL);
48034 if (!m->buf)
48035 goto Enomem;
48036 m->count = 0;
48037 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
48038 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48039 void *data)
48040 {
48041 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48042 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48043 int res = -ENOMEM;
48044
48045 if (op) {
48046 diff --git a/fs/splice.c b/fs/splice.c
48047 index 1ec0493..d6ab5c2 100644
48048 --- a/fs/splice.c
48049 +++ b/fs/splice.c
48050 @@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48051 pipe_lock(pipe);
48052
48053 for (;;) {
48054 - if (!pipe->readers) {
48055 + if (!atomic_read(&pipe->readers)) {
48056 send_sig(SIGPIPE, current, 0);
48057 if (!ret)
48058 ret = -EPIPE;
48059 @@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48060 do_wakeup = 0;
48061 }
48062
48063 - pipe->waiting_writers++;
48064 + atomic_inc(&pipe->waiting_writers);
48065 pipe_wait(pipe);
48066 - pipe->waiting_writers--;
48067 + atomic_dec(&pipe->waiting_writers);
48068 }
48069
48070 pipe_unlock(pipe);
48071 @@ -559,7 +559,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48072 old_fs = get_fs();
48073 set_fs(get_ds());
48074 /* The cast to a user pointer is valid due to the set_fs() */
48075 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48076 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48077 set_fs(old_fs);
48078
48079 return res;
48080 @@ -574,7 +574,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48081 old_fs = get_fs();
48082 set_fs(get_ds());
48083 /* The cast to a user pointer is valid due to the set_fs() */
48084 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48085 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48086 set_fs(old_fs);
48087
48088 return res;
48089 @@ -625,7 +625,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48090 goto err;
48091
48092 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48093 - vec[i].iov_base = (void __user *) page_address(page);
48094 + vec[i].iov_base = (void __force_user *) page_address(page);
48095 vec[i].iov_len = this_len;
48096 spd.pages[i] = page;
48097 spd.nr_pages++;
48098 @@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48099 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48100 {
48101 while (!pipe->nrbufs) {
48102 - if (!pipe->writers)
48103 + if (!atomic_read(&pipe->writers))
48104 return 0;
48105
48106 - if (!pipe->waiting_writers && sd->num_spliced)
48107 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48108 return 0;
48109
48110 if (sd->flags & SPLICE_F_NONBLOCK)
48111 @@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48112 * out of the pipe right after the splice_to_pipe(). So set
48113 * PIPE_READERS appropriately.
48114 */
48115 - pipe->readers = 1;
48116 + atomic_set(&pipe->readers, 1);
48117
48118 current->splice_pipe = pipe;
48119 }
48120 @@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48121 ret = -ERESTARTSYS;
48122 break;
48123 }
48124 - if (!pipe->writers)
48125 + if (!atomic_read(&pipe->writers))
48126 break;
48127 - if (!pipe->waiting_writers) {
48128 + if (!atomic_read(&pipe->waiting_writers)) {
48129 if (flags & SPLICE_F_NONBLOCK) {
48130 ret = -EAGAIN;
48131 break;
48132 @@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48133 pipe_lock(pipe);
48134
48135 while (pipe->nrbufs >= pipe->buffers) {
48136 - if (!pipe->readers) {
48137 + if (!atomic_read(&pipe->readers)) {
48138 send_sig(SIGPIPE, current, 0);
48139 ret = -EPIPE;
48140 break;
48141 @@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48142 ret = -ERESTARTSYS;
48143 break;
48144 }
48145 - pipe->waiting_writers++;
48146 + atomic_inc(&pipe->waiting_writers);
48147 pipe_wait(pipe);
48148 - pipe->waiting_writers--;
48149 + atomic_dec(&pipe->waiting_writers);
48150 }
48151
48152 pipe_unlock(pipe);
48153 @@ -1818,14 +1818,14 @@ retry:
48154 pipe_double_lock(ipipe, opipe);
48155
48156 do {
48157 - if (!opipe->readers) {
48158 + if (!atomic_read(&opipe->readers)) {
48159 send_sig(SIGPIPE, current, 0);
48160 if (!ret)
48161 ret = -EPIPE;
48162 break;
48163 }
48164
48165 - if (!ipipe->nrbufs && !ipipe->writers)
48166 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48167 break;
48168
48169 /*
48170 @@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48171 pipe_double_lock(ipipe, opipe);
48172
48173 do {
48174 - if (!opipe->readers) {
48175 + if (!atomic_read(&opipe->readers)) {
48176 send_sig(SIGPIPE, current, 0);
48177 if (!ret)
48178 ret = -EPIPE;
48179 @@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48180 * return EAGAIN if we have the potential of some data in the
48181 * future, otherwise just return 0
48182 */
48183 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48184 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48185 ret = -EAGAIN;
48186
48187 pipe_unlock(ipipe);
48188 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48189 index 7fdf6a7..e6cd8ad 100644
48190 --- a/fs/sysfs/dir.c
48191 +++ b/fs/sysfs/dir.c
48192 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48193 struct sysfs_dirent *sd;
48194 int rc;
48195
48196 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48197 + const char *parent_name = parent_sd->s_name;
48198 +
48199 + mode = S_IFDIR | S_IRWXU;
48200 +
48201 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48202 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48203 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48204 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48205 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48206 +#endif
48207 +
48208 /* allocate */
48209 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48210 if (!sd)
48211 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48212 index 00012e3..8392349 100644
48213 --- a/fs/sysfs/file.c
48214 +++ b/fs/sysfs/file.c
48215 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48216
48217 struct sysfs_open_dirent {
48218 atomic_t refcnt;
48219 - atomic_t event;
48220 + atomic_unchecked_t event;
48221 wait_queue_head_t poll;
48222 struct list_head buffers; /* goes through sysfs_buffer.list */
48223 };
48224 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48225 if (!sysfs_get_active(attr_sd))
48226 return -ENODEV;
48227
48228 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48229 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48230 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48231
48232 sysfs_put_active(attr_sd);
48233 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48234 return -ENOMEM;
48235
48236 atomic_set(&new_od->refcnt, 0);
48237 - atomic_set(&new_od->event, 1);
48238 + atomic_set_unchecked(&new_od->event, 1);
48239 init_waitqueue_head(&new_od->poll);
48240 INIT_LIST_HEAD(&new_od->buffers);
48241 goto retry;
48242 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48243
48244 sysfs_put_active(attr_sd);
48245
48246 - if (buffer->event != atomic_read(&od->event))
48247 + if (buffer->event != atomic_read_unchecked(&od->event))
48248 goto trigger;
48249
48250 return DEFAULT_POLLMASK;
48251 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48252
48253 od = sd->s_attr.open;
48254 if (od) {
48255 - atomic_inc(&od->event);
48256 + atomic_inc_unchecked(&od->event);
48257 wake_up_interruptible(&od->poll);
48258 }
48259
48260 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48261 index a7ac78f..02158e1 100644
48262 --- a/fs/sysfs/symlink.c
48263 +++ b/fs/sysfs/symlink.c
48264 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48265
48266 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48267 {
48268 - char *page = nd_get_link(nd);
48269 + const char *page = nd_get_link(nd);
48270 if (!IS_ERR(page))
48271 free_page((unsigned long)page);
48272 }
48273 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48274 index c175b4d..8f36a16 100644
48275 --- a/fs/udf/misc.c
48276 +++ b/fs/udf/misc.c
48277 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48278
48279 u8 udf_tag_checksum(const struct tag *t)
48280 {
48281 - u8 *data = (u8 *)t;
48282 + const u8 *data = (const u8 *)t;
48283 u8 checksum = 0;
48284 int i;
48285 for (i = 0; i < sizeof(struct tag); ++i)
48286 diff --git a/fs/utimes.c b/fs/utimes.c
48287 index ba653f3..06ea4b1 100644
48288 --- a/fs/utimes.c
48289 +++ b/fs/utimes.c
48290 @@ -1,6 +1,7 @@
48291 #include <linux/compiler.h>
48292 #include <linux/file.h>
48293 #include <linux/fs.h>
48294 +#include <linux/security.h>
48295 #include <linux/linkage.h>
48296 #include <linux/mount.h>
48297 #include <linux/namei.h>
48298 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48299 goto mnt_drop_write_and_out;
48300 }
48301 }
48302 +
48303 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48304 + error = -EACCES;
48305 + goto mnt_drop_write_and_out;
48306 + }
48307 +
48308 mutex_lock(&inode->i_mutex);
48309 error = notify_change(path->dentry, &newattrs);
48310 mutex_unlock(&inode->i_mutex);
48311 diff --git a/fs/xattr.c b/fs/xattr.c
48312 index 82f4337..236473c 100644
48313 --- a/fs/xattr.c
48314 +++ b/fs/xattr.c
48315 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48316 * Extended attribute SET operations
48317 */
48318 static long
48319 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48320 +setxattr(struct path *path, const char __user *name, const void __user *value,
48321 size_t size, int flags)
48322 {
48323 int error;
48324 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48325 return PTR_ERR(kvalue);
48326 }
48327
48328 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48329 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48330 + error = -EACCES;
48331 + goto out;
48332 + }
48333 +
48334 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48335 +out:
48336 kfree(kvalue);
48337 return error;
48338 }
48339 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48340 return error;
48341 error = mnt_want_write(path.mnt);
48342 if (!error) {
48343 - error = setxattr(path.dentry, name, value, size, flags);
48344 + error = setxattr(&path, name, value, size, flags);
48345 mnt_drop_write(path.mnt);
48346 }
48347 path_put(&path);
48348 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48349 return error;
48350 error = mnt_want_write(path.mnt);
48351 if (!error) {
48352 - error = setxattr(path.dentry, name, value, size, flags);
48353 + error = setxattr(&path, name, value, size, flags);
48354 mnt_drop_write(path.mnt);
48355 }
48356 path_put(&path);
48357 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48358 const void __user *,value, size_t, size, int, flags)
48359 {
48360 struct file *f;
48361 - struct dentry *dentry;
48362 int error = -EBADF;
48363
48364 f = fget(fd);
48365 if (!f)
48366 return error;
48367 - dentry = f->f_path.dentry;
48368 - audit_inode(NULL, dentry);
48369 + audit_inode(NULL, f->f_path.dentry);
48370 error = mnt_want_write_file(f);
48371 if (!error) {
48372 - error = setxattr(dentry, name, value, size, flags);
48373 + error = setxattr(&f->f_path, name, value, size, flags);
48374 mnt_drop_write_file(f);
48375 }
48376 fput(f);
48377 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48378 index 8d5a506..7f62712 100644
48379 --- a/fs/xattr_acl.c
48380 +++ b/fs/xattr_acl.c
48381 @@ -17,8 +17,8 @@
48382 struct posix_acl *
48383 posix_acl_from_xattr(const void *value, size_t size)
48384 {
48385 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48386 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48387 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48388 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48389 int count;
48390 struct posix_acl *acl;
48391 struct posix_acl_entry *acl_e;
48392 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48393 index 188ef2f..adcf864 100644
48394 --- a/fs/xfs/xfs_bmap.c
48395 +++ b/fs/xfs/xfs_bmap.c
48396 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48397 int nmap,
48398 int ret_nmap);
48399 #else
48400 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48401 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48402 #endif /* DEBUG */
48403
48404 STATIC int
48405 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48406 index 79d05e8..e3e5861 100644
48407 --- a/fs/xfs/xfs_dir2_sf.c
48408 +++ b/fs/xfs/xfs_dir2_sf.c
48409 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48410 }
48411
48412 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48413 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48414 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48415 + char name[sfep->namelen];
48416 + memcpy(name, sfep->name, sfep->namelen);
48417 + if (filldir(dirent, name, sfep->namelen,
48418 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48419 + *offset = off & 0x7fffffff;
48420 + return 0;
48421 + }
48422 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48423 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48424 *offset = off & 0x7fffffff;
48425 return 0;
48426 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48427 index 76f3ca5..f57f712 100644
48428 --- a/fs/xfs/xfs_ioctl.c
48429 +++ b/fs/xfs/xfs_ioctl.c
48430 @@ -128,7 +128,7 @@ xfs_find_handle(
48431 }
48432
48433 error = -EFAULT;
48434 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48435 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48436 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48437 goto out_put;
48438
48439 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48440 index ab30253..4d86958 100644
48441 --- a/fs/xfs/xfs_iops.c
48442 +++ b/fs/xfs/xfs_iops.c
48443 @@ -447,7 +447,7 @@ xfs_vn_put_link(
48444 struct nameidata *nd,
48445 void *p)
48446 {
48447 - char *s = nd_get_link(nd);
48448 + const char *s = nd_get_link(nd);
48449
48450 if (!IS_ERR(s))
48451 kfree(s);
48452 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48453 new file mode 100644
48454 index 0000000..4089e05
48455 --- /dev/null
48456 +++ b/grsecurity/Kconfig
48457 @@ -0,0 +1,1078 @@
48458 +#
48459 +# grecurity configuration
48460 +#
48461 +
48462 +menu "Grsecurity"
48463 +
48464 +config GRKERNSEC
48465 + bool "Grsecurity"
48466 + select CRYPTO
48467 + select CRYPTO_SHA256
48468 + help
48469 + If you say Y here, you will be able to configure many features
48470 + that will enhance the security of your system. It is highly
48471 + recommended that you say Y here and read through the help
48472 + for each option so that you fully understand the features and
48473 + can evaluate their usefulness for your machine.
48474 +
48475 +choice
48476 + prompt "Security Level"
48477 + depends on GRKERNSEC
48478 + default GRKERNSEC_CUSTOM
48479 +
48480 +config GRKERNSEC_LOW
48481 + bool "Low"
48482 + select GRKERNSEC_LINK
48483 + select GRKERNSEC_FIFO
48484 + select GRKERNSEC_RANDNET
48485 + select GRKERNSEC_DMESG
48486 + select GRKERNSEC_CHROOT
48487 + select GRKERNSEC_CHROOT_CHDIR
48488 +
48489 + help
48490 + If you choose this option, several of the grsecurity options will
48491 + be enabled that will give you greater protection against a number
48492 + of attacks, while assuring that none of your software will have any
48493 + conflicts with the additional security measures. If you run a lot
48494 + of unusual software, or you are having problems with the higher
48495 + security levels, you should say Y here. With this option, the
48496 + following features are enabled:
48497 +
48498 + - Linking restrictions
48499 + - FIFO restrictions
48500 + - Restricted dmesg
48501 + - Enforced chdir("/") on chroot
48502 + - Runtime module disabling
48503 +
48504 +config GRKERNSEC_MEDIUM
48505 + bool "Medium"
48506 + select PAX
48507 + select PAX_EI_PAX
48508 + select PAX_PT_PAX_FLAGS
48509 + select PAX_HAVE_ACL_FLAGS
48510 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48511 + select GRKERNSEC_CHROOT
48512 + select GRKERNSEC_CHROOT_SYSCTL
48513 + select GRKERNSEC_LINK
48514 + select GRKERNSEC_FIFO
48515 + select GRKERNSEC_DMESG
48516 + select GRKERNSEC_RANDNET
48517 + select GRKERNSEC_FORKFAIL
48518 + select GRKERNSEC_TIME
48519 + select GRKERNSEC_SIGNAL
48520 + select GRKERNSEC_CHROOT
48521 + select GRKERNSEC_CHROOT_UNIX
48522 + select GRKERNSEC_CHROOT_MOUNT
48523 + select GRKERNSEC_CHROOT_PIVOT
48524 + select GRKERNSEC_CHROOT_DOUBLE
48525 + select GRKERNSEC_CHROOT_CHDIR
48526 + select GRKERNSEC_CHROOT_MKNOD
48527 + select GRKERNSEC_PROC
48528 + select GRKERNSEC_PROC_USERGROUP
48529 + select PAX_RANDUSTACK
48530 + select PAX_ASLR
48531 + select PAX_RANDMMAP
48532 + select PAX_REFCOUNT if (X86 || SPARC64)
48533 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
48534 +
48535 + help
48536 + If you say Y here, several features in addition to those included
48537 + in the low additional security level will be enabled. These
48538 + features provide even more security to your system, though in rare
48539 + cases they may be incompatible with very old or poorly written
48540 + software. If you enable this option, make sure that your auth
48541 + service (identd) is running as gid 1001. With this option,
48542 + the following features (in addition to those provided in the
48543 + low additional security level) will be enabled:
48544 +
48545 + - Failed fork logging
48546 + - Time change logging
48547 + - Signal logging
48548 + - Deny mounts in chroot
48549 + - Deny double chrooting
48550 + - Deny sysctl writes in chroot
48551 + - Deny mknod in chroot
48552 + - Deny access to abstract AF_UNIX sockets out of chroot
48553 + - Deny pivot_root in chroot
48554 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
48555 + - /proc restrictions with special GID set to 10 (usually wheel)
48556 + - Address Space Layout Randomization (ASLR)
48557 + - Prevent exploitation of most refcount overflows
48558 + - Bounds checking of copying between the kernel and userland
48559 +
48560 +config GRKERNSEC_HIGH
48561 + bool "High"
48562 + select GRKERNSEC_LINK
48563 + select GRKERNSEC_FIFO
48564 + select GRKERNSEC_DMESG
48565 + select GRKERNSEC_FORKFAIL
48566 + select GRKERNSEC_TIME
48567 + select GRKERNSEC_SIGNAL
48568 + select GRKERNSEC_CHROOT
48569 + select GRKERNSEC_CHROOT_SHMAT
48570 + select GRKERNSEC_CHROOT_UNIX
48571 + select GRKERNSEC_CHROOT_MOUNT
48572 + select GRKERNSEC_CHROOT_FCHDIR
48573 + select GRKERNSEC_CHROOT_PIVOT
48574 + select GRKERNSEC_CHROOT_DOUBLE
48575 + select GRKERNSEC_CHROOT_CHDIR
48576 + select GRKERNSEC_CHROOT_MKNOD
48577 + select GRKERNSEC_CHROOT_CAPS
48578 + select GRKERNSEC_CHROOT_SYSCTL
48579 + select GRKERNSEC_CHROOT_FINDTASK
48580 + select GRKERNSEC_SYSFS_RESTRICT
48581 + select GRKERNSEC_PROC
48582 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48583 + select GRKERNSEC_HIDESYM
48584 + select GRKERNSEC_BRUTE
48585 + select GRKERNSEC_PROC_USERGROUP
48586 + select GRKERNSEC_KMEM
48587 + select GRKERNSEC_RESLOG
48588 + select GRKERNSEC_RANDNET
48589 + select GRKERNSEC_PROC_ADD
48590 + select GRKERNSEC_CHROOT_CHMOD
48591 + select GRKERNSEC_CHROOT_NICE
48592 + select GRKERNSEC_SETXID
48593 + select GRKERNSEC_AUDIT_MOUNT
48594 + select GRKERNSEC_MODHARDEN if (MODULES)
48595 + select GRKERNSEC_HARDEN_PTRACE
48596 + select GRKERNSEC_PTRACE_READEXEC
48597 + select GRKERNSEC_VM86 if (X86_32)
48598 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
48599 + select PAX
48600 + select PAX_RANDUSTACK
48601 + select PAX_ASLR
48602 + select PAX_RANDMMAP
48603 + select PAX_NOEXEC
48604 + select PAX_MPROTECT
48605 + select PAX_EI_PAX
48606 + select PAX_PT_PAX_FLAGS
48607 + select PAX_HAVE_ACL_FLAGS
48608 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
48609 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
48610 + select PAX_RANDKSTACK if (X86_TSC && X86)
48611 + select PAX_SEGMEXEC if (X86_32)
48612 + select PAX_PAGEEXEC
48613 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
48614 + select PAX_EMUTRAMP if (PARISC)
48615 + select PAX_EMUSIGRT if (PARISC)
48616 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
48617 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
48618 + select PAX_REFCOUNT if (X86 || SPARC64)
48619 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
48620 + help
48621 + If you say Y here, many of the features of grsecurity will be
48622 + enabled, which will protect you against many kinds of attacks
48623 + against your system. The heightened security comes at a cost
48624 + of an increased chance of incompatibilities with rare software
48625 + on your machine. Since this security level enables PaX, you should
48626 + view <http://pax.grsecurity.net> and read about the PaX
48627 + project. While you are there, download chpax and run it on
48628 + binaries that cause problems with PaX. Also remember that
48629 + since the /proc restrictions are enabled, you must run your
48630 + identd as gid 1001. This security level enables the following
48631 + features in addition to those listed in the low and medium
48632 + security levels:
48633 +
48634 + - Additional /proc restrictions
48635 + - Chmod restrictions in chroot
48636 + - No signals, ptrace, or viewing of processes outside of chroot
48637 + - Capability restrictions in chroot
48638 + - Deny fchdir out of chroot
48639 + - Priority restrictions in chroot
48640 + - Segmentation-based implementation of PaX
48641 + - Mprotect restrictions
48642 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
48643 + - Kernel stack randomization
48644 + - Mount/unmount/remount logging
48645 + - Kernel symbol hiding
48646 + - Hardening of module auto-loading
48647 + - Ptrace restrictions
48648 + - Restricted vm86 mode
48649 + - Restricted sysfs/debugfs
48650 + - Active kernel exploit response
48651 +
48652 +config GRKERNSEC_CUSTOM
48653 + bool "Custom"
48654 + help
48655 + If you say Y here, you will be able to configure every grsecurity
48656 + option, which allows you to enable many more features that aren't
48657 + covered in the basic security levels. These additional features
48658 + include TPE, socket restrictions, and the sysctl system for
48659 + grsecurity. It is advised that you read through the help for
48660 + each option to determine its usefulness in your situation.
48661 +
48662 +endchoice
48663 +
48664 +menu "Memory Protections"
48665 +depends on GRKERNSEC
48666 +
48667 +config GRKERNSEC_KMEM
48668 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
48669 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
48670 + help
48671 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
48672 + be written to or read from to modify or leak the contents of the running
48673 + kernel. /dev/port will also not be allowed to be opened. If you have module
48674 + support disabled, enabling this will close up four ways that are
48675 + currently used to insert malicious code into the running kernel.
48676 + Even with all these features enabled, we still highly recommend that
48677 + you use the RBAC system, as it is still possible for an attacker to
48678 + modify the running kernel through privileged I/O granted by ioperm/iopl.
48679 + If you are not using XFree86, you may be able to stop this additional
48680 + case by enabling the 'Disable privileged I/O' option. Though nothing
48681 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
48682 + but only to video memory, which is the only writing we allow in this
48683 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
48684 + not be allowed to mprotect it with PROT_WRITE later.
48685 + It is highly recommended that you say Y here if you meet all the
48686 + conditions above.
48687 +
48688 +config GRKERNSEC_VM86
48689 + bool "Restrict VM86 mode"
48690 + depends on X86_32
48691 +
48692 + help
48693 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
48694 + make use of a special execution mode on 32bit x86 processors called
48695 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
48696 + video cards and will still work with this option enabled. The purpose
48697 + of the option is to prevent exploitation of emulation errors in
48698 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
48699 + Nearly all users should be able to enable this option.
48700 +
48701 +config GRKERNSEC_IO
48702 + bool "Disable privileged I/O"
48703 + depends on X86
48704 + select RTC_CLASS
48705 + select RTC_INTF_DEV
48706 + select RTC_DRV_CMOS
48707 +
48708 + help
48709 + If you say Y here, all ioperm and iopl calls will return an error.
48710 + Ioperm and iopl can be used to modify the running kernel.
48711 + Unfortunately, some programs need this access to operate properly,
48712 + the most notable of which are XFree86 and hwclock. hwclock can be
48713 + remedied by having RTC support in the kernel, so real-time
48714 + clock support is enabled if this option is enabled, to ensure
48715 + that hwclock operates correctly. XFree86 still will not
48716 + operate correctly with this option enabled, so DO NOT CHOOSE Y
48717 + IF YOU USE XFree86. If you use XFree86 and you still want to
48718 + protect your kernel against modification, use the RBAC system.
48719 +
48720 +config GRKERNSEC_PROC_MEMMAP
48721 + bool "Harden ASLR against information leaks and entropy reduction"
48722 + default y if (PAX_NOEXEC || PAX_ASLR)
48723 + depends on PAX_NOEXEC || PAX_ASLR
48724 + help
48725 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
48726 + give no information about the addresses of its mappings if
48727 + PaX features that rely on random addresses are enabled on the task.
48728 + In addition to sanitizing this information and disabling other
48729 + dangerous sources of information, this option causes reads of sensitive
48730 + /proc/<pid> entries where the file descriptor was opened in a different
48731 + task than the one performing the read. Such attempts are logged.
48732 + This option also limits argv/env strings for suid/sgid binaries
48733 + to 512KB to prevent a complete exhaustion of the stack entropy provided
48734 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
48735 + binaries to prevent alternative mmap layouts from being abused.
48736 +
48737 + If you use PaX it is essential that you say Y here as it closes up
48738 + several holes that make full ASLR useless locally.
48739 +
48740 +config GRKERNSEC_BRUTE
48741 + bool "Deter exploit bruteforcing"
48742 + help
48743 + If you say Y here, attempts to bruteforce exploits against forking
48744 + daemons such as apache or sshd, as well as against suid/sgid binaries
48745 + will be deterred. When a child of a forking daemon is killed by PaX
48746 + or crashes due to an illegal instruction or other suspicious signal,
48747 + the parent process will be delayed 30 seconds upon every subsequent
48748 + fork until the administrator is able to assess the situation and
48749 + restart the daemon.
48750 + In the suid/sgid case, the attempt is logged, the user has all their
48751 + processes terminated, and they are prevented from executing any further
48752 + processes for 15 minutes.
48753 + It is recommended that you also enable signal logging in the auditing
48754 + section so that logs are generated when a process triggers a suspicious
48755 + signal.
48756 + If the sysctl option is enabled, a sysctl option with name
48757 + "deter_bruteforce" is created.
48758 +
48759 +
48760 +config GRKERNSEC_MODHARDEN
48761 + bool "Harden module auto-loading"
48762 + depends on MODULES
48763 + help
48764 + If you say Y here, module auto-loading in response to use of some
48765 + feature implemented by an unloaded module will be restricted to
48766 + root users. Enabling this option helps defend against attacks
48767 + by unprivileged users who abuse the auto-loading behavior to
48768 + cause a vulnerable module to load that is then exploited.
48769 +
48770 + If this option prevents a legitimate use of auto-loading for a
48771 + non-root user, the administrator can execute modprobe manually
48772 + with the exact name of the module mentioned in the alert log.
48773 + Alternatively, the administrator can add the module to the list
48774 + of modules loaded at boot by modifying init scripts.
48775 +
48776 + Modification of init scripts will most likely be needed on
48777 + Ubuntu servers with encrypted home directory support enabled,
48778 + as the first non-root user logging in will cause the ecb(aes),
48779 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
48780 +
48781 +config GRKERNSEC_HIDESYM
48782 + bool "Hide kernel symbols"
48783 + help
48784 + If you say Y here, getting information on loaded modules, and
48785 + displaying all kernel symbols through a syscall will be restricted
48786 + to users with CAP_SYS_MODULE. For software compatibility reasons,
48787 + /proc/kallsyms will be restricted to the root user. The RBAC
48788 + system can hide that entry even from root.
48789 +
48790 + This option also prevents leaking of kernel addresses through
48791 + several /proc entries.
48792 +
48793 + Note that this option is only effective provided the following
48794 + conditions are met:
48795 + 1) The kernel using grsecurity is not precompiled by some distribution
48796 + 2) You have also enabled GRKERNSEC_DMESG
48797 + 3) You are using the RBAC system and hiding other files such as your
48798 + kernel image and System.map. Alternatively, enabling this option
48799 + causes the permissions on /boot, /lib/modules, and the kernel
48800 + source directory to change at compile time to prevent
48801 + reading by non-root users.
48802 + If the above conditions are met, this option will aid in providing a
48803 + useful protection against local kernel exploitation of overflows
48804 + and arbitrary read/write vulnerabilities.
48805 +
48806 +config GRKERNSEC_KERN_LOCKOUT
48807 + bool "Active kernel exploit response"
48808 + depends on X86 || ARM || PPC || SPARC
48809 + help
48810 + If you say Y here, when a PaX alert is triggered due to suspicious
48811 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
48812 + or an OOPs occurs due to bad memory accesses, instead of just
48813 + terminating the offending process (and potentially allowing
48814 + a subsequent exploit from the same user), we will take one of two
48815 + actions:
48816 + If the user was root, we will panic the system
48817 + If the user was non-root, we will log the attempt, terminate
48818 + all processes owned by the user, then prevent them from creating
48819 + any new processes until the system is restarted
48820 + This deters repeated kernel exploitation/bruteforcing attempts
48821 + and is useful for later forensics.
48822 +
48823 +endmenu
48824 +menu "Role Based Access Control Options"
48825 +depends on GRKERNSEC
48826 +
48827 +config GRKERNSEC_RBAC_DEBUG
48828 + bool
48829 +
48830 +config GRKERNSEC_NO_RBAC
48831 + bool "Disable RBAC system"
48832 + help
48833 + If you say Y here, the /dev/grsec device will be removed from the kernel,
48834 + preventing the RBAC system from being enabled. You should only say Y
48835 + here if you have no intention of using the RBAC system, so as to prevent
48836 + an attacker with root access from misusing the RBAC system to hide files
48837 + and processes when loadable module support and /dev/[k]mem have been
48838 + locked down.
48839 +
48840 +config GRKERNSEC_ACL_HIDEKERN
48841 + bool "Hide kernel processes"
48842 + help
48843 + If you say Y here, all kernel threads will be hidden to all
48844 + processes but those whose subject has the "view hidden processes"
48845 + flag.
48846 +
48847 +config GRKERNSEC_ACL_MAXTRIES
48848 + int "Maximum tries before password lockout"
48849 + default 3
48850 + help
48851 + This option enforces the maximum number of times a user can attempt
48852 + to authorize themselves with the grsecurity RBAC system before being
48853 + denied the ability to attempt authorization again for a specified time.
48854 + The lower the number, the harder it will be to brute-force a password.
48855 +
48856 +config GRKERNSEC_ACL_TIMEOUT
48857 + int "Time to wait after max password tries, in seconds"
48858 + default 30
48859 + help
48860 + This option specifies the time the user must wait after attempting to
48861 + authorize to the RBAC system with the maximum number of invalid
48862 + passwords. The higher the number, the harder it will be to brute-force
48863 + a password.
48864 +
48865 +endmenu
48866 +menu "Filesystem Protections"
48867 +depends on GRKERNSEC
48868 +
48869 +config GRKERNSEC_PROC
48870 + bool "Proc restrictions"
48871 + help
48872 + If you say Y here, the permissions of the /proc filesystem
48873 + will be altered to enhance system security and privacy. You MUST
48874 + choose either a user only restriction or a user and group restriction.
48875 + Depending upon the option you choose, you can either restrict users to
48876 + see only the processes they themselves run, or choose a group that can
48877 + view all processes and files normally restricted to root if you choose
48878 + the "restrict to user only" option. NOTE: If you're running identd or
48879 + ntpd as a non-root user, you will have to run it as the group you
48880 + specify here.
48881 +
48882 +config GRKERNSEC_PROC_USER
48883 + bool "Restrict /proc to user only"
48884 + depends on GRKERNSEC_PROC
48885 + help
48886 + If you say Y here, non-root users will only be able to view their own
48887 + processes, and restricts them from viewing network-related information,
48888 + and viewing kernel symbol and module information.
48889 +
48890 +config GRKERNSEC_PROC_USERGROUP
48891 + bool "Allow special group"
48892 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
48893 + help
48894 + If you say Y here, you will be able to select a group that will be
48895 + able to view all processes and network-related information. If you've
48896 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
48897 + remain hidden. This option is useful if you want to run identd as
48898 + a non-root user.
48899 +
48900 +config GRKERNSEC_PROC_GID
48901 + int "GID for special group"
48902 + depends on GRKERNSEC_PROC_USERGROUP
48903 + default 1001
48904 +
48905 +config GRKERNSEC_PROC_ADD
48906 + bool "Additional restrictions"
48907 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
48908 + help
48909 + If you say Y here, additional restrictions will be placed on
48910 + /proc that keep normal users from viewing device information and
48911 + slabinfo information that could be useful for exploits.
48912 +
48913 +config GRKERNSEC_LINK
48914 + bool "Linking restrictions"
48915 + help
48916 + If you say Y here, /tmp race exploits will be prevented, since users
48917 + will no longer be able to follow symlinks owned by other users in
48918 + world-writable +t directories (e.g. /tmp), unless the owner of the
48919 + symlink is the owner of the directory. users will also not be
48920 + able to hardlink to files they do not own. If the sysctl option is
48921 + enabled, a sysctl option with name "linking_restrictions" is created.
48922 +
48923 +config GRKERNSEC_FIFO
48924 + bool "FIFO restrictions"
48925 + help
48926 + If you say Y here, users will not be able to write to FIFOs they don't
48927 + own in world-writable +t directories (e.g. /tmp), unless the owner of
48928 + the FIFO is the same owner of the directory it's held in. If the sysctl
48929 + option is enabled, a sysctl option with name "fifo_restrictions" is
48930 + created.
48931 +
48932 +config GRKERNSEC_SYSFS_RESTRICT
48933 + bool "Sysfs/debugfs restriction"
48934 + depends on SYSFS
48935 + help
48936 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
48937 + any filesystem normally mounted under it (e.g. debugfs) will be
48938 + mostly accessible only by root. These filesystems generally provide access
48939 + to hardware and debug information that isn't appropriate for unprivileged
48940 + users of the system. Sysfs and debugfs have also become a large source
48941 + of new vulnerabilities, ranging from infoleaks to local compromise.
48942 + There has been very little oversight with an eye toward security involved
48943 + in adding new exporters of information to these filesystems, so their
48944 + use is discouraged.
48945 + For reasons of compatibility, a few directories have been whitelisted
48946 + for access by non-root users:
48947 + /sys/fs/selinux
48948 + /sys/fs/fuse
48949 + /sys/devices/system/cpu
48950 +
48951 +config GRKERNSEC_ROFS
48952 + bool "Runtime read-only mount protection"
48953 + help
48954 + If you say Y here, a sysctl option with name "romount_protect" will
48955 + be created. By setting this option to 1 at runtime, filesystems
48956 + will be protected in the following ways:
48957 + * No new writable mounts will be allowed
48958 + * Existing read-only mounts won't be able to be remounted read/write
48959 + * Write operations will be denied on all block devices
48960 + This option acts independently of grsec_lock: once it is set to 1,
48961 + it cannot be turned off. Therefore, please be mindful of the resulting
48962 + behavior if this option is enabled in an init script on a read-only
48963 + filesystem. This feature is mainly intended for secure embedded systems.
48964 +
48965 +config GRKERNSEC_CHROOT
48966 + bool "Chroot jail restrictions"
48967 + help
48968 + If you say Y here, you will be able to choose several options that will
48969 + make breaking out of a chrooted jail much more difficult. If you
48970 + encounter no software incompatibilities with the following options, it
48971 + is recommended that you enable each one.
48972 +
48973 +config GRKERNSEC_CHROOT_MOUNT
48974 + bool "Deny mounts"
48975 + depends on GRKERNSEC_CHROOT
48976 + help
48977 + If you say Y here, processes inside a chroot will not be able to
48978 + mount or remount filesystems. If the sysctl option is enabled, a
48979 + sysctl option with name "chroot_deny_mount" is created.
48980 +
48981 +config GRKERNSEC_CHROOT_DOUBLE
48982 + bool "Deny double-chroots"
48983 + depends on GRKERNSEC_CHROOT
48984 + help
48985 + If you say Y here, processes inside a chroot will not be able to chroot
48986 + again outside the chroot. This is a widely used method of breaking
48987 + out of a chroot jail and should not be allowed. If the sysctl
48988 + option is enabled, a sysctl option with name
48989 + "chroot_deny_chroot" is created.
48990 +
48991 +config GRKERNSEC_CHROOT_PIVOT
48992 + bool "Deny pivot_root in chroot"
48993 + depends on GRKERNSEC_CHROOT
48994 + help
48995 + If you say Y here, processes inside a chroot will not be able to use
48996 + a function called pivot_root() that was introduced in Linux 2.3.41. It
48997 + works similar to chroot in that it changes the root filesystem. This
48998 + function could be misused in a chrooted process to attempt to break out
48999 + of the chroot, and therefore should not be allowed. If the sysctl
49000 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49001 + created.
49002 +
49003 +config GRKERNSEC_CHROOT_CHDIR
49004 + bool "Enforce chdir(\"/\") on all chroots"
49005 + depends on GRKERNSEC_CHROOT
49006 + help
49007 + If you say Y here, the current working directory of all newly-chrooted
49008 + applications will be set to the the root directory of the chroot.
49009 + The man page on chroot(2) states:
49010 + Note that this call does not change the current working
49011 + directory, so that `.' can be outside the tree rooted at
49012 + `/'. In particular, the super-user can escape from a
49013 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49014 +
49015 + It is recommended that you say Y here, since it's not known to break
49016 + any software. If the sysctl option is enabled, a sysctl option with
49017 + name "chroot_enforce_chdir" is created.
49018 +
49019 +config GRKERNSEC_CHROOT_CHMOD
49020 + bool "Deny (f)chmod +s"
49021 + depends on GRKERNSEC_CHROOT
49022 + help
49023 + If you say Y here, processes inside a chroot will not be able to chmod
49024 + or fchmod files to make them have suid or sgid bits. This protects
49025 + against another published method of breaking a chroot. If the sysctl
49026 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49027 + created.
49028 +
49029 +config GRKERNSEC_CHROOT_FCHDIR
49030 + bool "Deny fchdir out of chroot"
49031 + depends on GRKERNSEC_CHROOT
49032 + help
49033 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49034 + to a file descriptor of the chrooting process that points to a directory
49035 + outside the filesystem will be stopped. If the sysctl option
49036 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49037 +
49038 +config GRKERNSEC_CHROOT_MKNOD
49039 + bool "Deny mknod"
49040 + depends on GRKERNSEC_CHROOT
49041 + help
49042 + If you say Y here, processes inside a chroot will not be allowed to
49043 + mknod. The problem with using mknod inside a chroot is that it
49044 + would allow an attacker to create a device entry that is the same
49045 + as one on the physical root of your system, which could range from
49046 + anything from the console device to a device for your harddrive (which
49047 + they could then use to wipe the drive or steal data). It is recommended
49048 + that you say Y here, unless you run into software incompatibilities.
49049 + If the sysctl option is enabled, a sysctl option with name
49050 + "chroot_deny_mknod" is created.
49051 +
49052 +config GRKERNSEC_CHROOT_SHMAT
49053 + bool "Deny shmat() out of chroot"
49054 + depends on GRKERNSEC_CHROOT
49055 + help
49056 + If you say Y here, processes inside a chroot will not be able to attach
49057 + to shared memory segments that were created outside of the chroot jail.
49058 + It is recommended that you say Y here. If the sysctl option is enabled,
49059 + a sysctl option with name "chroot_deny_shmat" is created.
49060 +
49061 +config GRKERNSEC_CHROOT_UNIX
49062 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49063 + depends on GRKERNSEC_CHROOT
49064 + help
49065 + If you say Y here, processes inside a chroot will not be able to
49066 + connect to abstract (meaning not belonging to a filesystem) Unix
49067 + domain sockets that were bound outside of a chroot. It is recommended
49068 + that you say Y here. If the sysctl option is enabled, a sysctl option
49069 + with name "chroot_deny_unix" is created.
49070 +
49071 +config GRKERNSEC_CHROOT_FINDTASK
49072 + bool "Protect outside processes"
49073 + depends on GRKERNSEC_CHROOT
49074 + help
49075 + If you say Y here, processes inside a chroot will not be able to
49076 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49077 + getsid, or view any process outside of the chroot. If the sysctl
49078 + option is enabled, a sysctl option with name "chroot_findtask" is
49079 + created.
49080 +
49081 +config GRKERNSEC_CHROOT_NICE
49082 + bool "Restrict priority changes"
49083 + depends on GRKERNSEC_CHROOT
49084 + help
49085 + If you say Y here, processes inside a chroot will not be able to raise
49086 + the priority of processes in the chroot, or alter the priority of
49087 + processes outside the chroot. This provides more security than simply
49088 + removing CAP_SYS_NICE from the process' capability set. If the
49089 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49090 + is created.
49091 +
49092 +config GRKERNSEC_CHROOT_SYSCTL
49093 + bool "Deny sysctl writes"
49094 + depends on GRKERNSEC_CHROOT
49095 + help
49096 + If you say Y here, an attacker in a chroot will not be able to
49097 + write to sysctl entries, either by sysctl(2) or through a /proc
49098 + interface. It is strongly recommended that you say Y here. If the
49099 + sysctl option is enabled, a sysctl option with name
49100 + "chroot_deny_sysctl" is created.
49101 +
49102 +config GRKERNSEC_CHROOT_CAPS
49103 + bool "Capability restrictions"
49104 + depends on GRKERNSEC_CHROOT
49105 + help
49106 + If you say Y here, the capabilities on all processes within a
49107 + chroot jail will be lowered to stop module insertion, raw i/o,
49108 + system and net admin tasks, rebooting the system, modifying immutable
49109 + files, modifying IPC owned by another, and changing the system time.
49110 + This is left an option because it can break some apps. Disable this
49111 + if your chrooted apps are having problems performing those kinds of
49112 + tasks. If the sysctl option is enabled, a sysctl option with
49113 + name "chroot_caps" is created.
49114 +
49115 +endmenu
49116 +menu "Kernel Auditing"
49117 +depends on GRKERNSEC
49118 +
49119 +config GRKERNSEC_AUDIT_GROUP
49120 + bool "Single group for auditing"
49121 + help
49122 + If you say Y here, the exec, chdir, and (un)mount logging features
49123 + will only operate on a group you specify. This option is recommended
49124 + if you only want to watch certain users instead of having a large
49125 + amount of logs from the entire system. If the sysctl option is enabled,
49126 + a sysctl option with name "audit_group" is created.
49127 +
49128 +config GRKERNSEC_AUDIT_GID
49129 + int "GID for auditing"
49130 + depends on GRKERNSEC_AUDIT_GROUP
49131 + default 1007
49132 +
49133 +config GRKERNSEC_EXECLOG
49134 + bool "Exec logging"
49135 + help
49136 + If you say Y here, all execve() calls will be logged (since the
49137 + other exec*() calls are frontends to execve(), all execution
49138 + will be logged). Useful for shell-servers that like to keep track
49139 + of their users. If the sysctl option is enabled, a sysctl option with
49140 + name "exec_logging" is created.
49141 + WARNING: This option when enabled will produce a LOT of logs, especially
49142 + on an active system.
49143 +
49144 +config GRKERNSEC_RESLOG
49145 + bool "Resource logging"
49146 + help
49147 + If you say Y here, all attempts to overstep resource limits will
49148 + be logged with the resource name, the requested size, and the current
49149 + limit. It is highly recommended that you say Y here. If the sysctl
49150 + option is enabled, a sysctl option with name "resource_logging" is
49151 + created. If the RBAC system is enabled, the sysctl value is ignored.
49152 +
49153 +config GRKERNSEC_CHROOT_EXECLOG
49154 + bool "Log execs within chroot"
49155 + help
49156 + If you say Y here, all executions inside a chroot jail will be logged
49157 + to syslog. This can cause a large amount of logs if certain
49158 + applications (eg. djb's daemontools) are installed on the system, and
49159 + is therefore left as an option. If the sysctl option is enabled, a
49160 + sysctl option with name "chroot_execlog" is created.
49161 +
49162 +config GRKERNSEC_AUDIT_PTRACE
49163 + bool "Ptrace logging"
49164 + help
49165 + If you say Y here, all attempts to attach to a process via ptrace
49166 + will be logged. If the sysctl option is enabled, a sysctl option
49167 + with name "audit_ptrace" is created.
49168 +
49169 +config GRKERNSEC_AUDIT_CHDIR
49170 + bool "Chdir logging"
49171 + help
49172 + If you say Y here, all chdir() calls will be logged. If the sysctl
49173 + option is enabled, a sysctl option with name "audit_chdir" is created.
49174 +
49175 +config GRKERNSEC_AUDIT_MOUNT
49176 + bool "(Un)Mount logging"
49177 + help
49178 + If you say Y here, all mounts and unmounts will be logged. If the
49179 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49180 + created.
49181 +
49182 +config GRKERNSEC_SIGNAL
49183 + bool "Signal logging"
49184 + help
49185 + If you say Y here, certain important signals will be logged, such as
49186 + SIGSEGV, which will as a result inform you of when a error in a program
49187 + occurred, which in some cases could mean a possible exploit attempt.
49188 + If the sysctl option is enabled, a sysctl option with name
49189 + "signal_logging" is created.
49190 +
49191 +config GRKERNSEC_FORKFAIL
49192 + bool "Fork failure logging"
49193 + help
49194 + If you say Y here, all failed fork() attempts will be logged.
49195 + This could suggest a fork bomb, or someone attempting to overstep
49196 + their process limit. If the sysctl option is enabled, a sysctl option
49197 + with name "forkfail_logging" is created.
49198 +
49199 +config GRKERNSEC_TIME
49200 + bool "Time change logging"
49201 + help
49202 + If you say Y here, any changes of the system clock will be logged.
49203 + If the sysctl option is enabled, a sysctl option with name
49204 + "timechange_logging" is created.
49205 +
49206 +config GRKERNSEC_PROC_IPADDR
49207 + bool "/proc/<pid>/ipaddr support"
49208 + help
49209 + If you say Y here, a new entry will be added to each /proc/<pid>
49210 + directory that contains the IP address of the person using the task.
49211 + The IP is carried across local TCP and AF_UNIX stream sockets.
49212 + This information can be useful for IDS/IPSes to perform remote response
49213 + to a local attack. The entry is readable by only the owner of the
49214 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49215 + the RBAC system), and thus does not create privacy concerns.
49216 +
49217 +config GRKERNSEC_RWXMAP_LOG
49218 + bool 'Denied RWX mmap/mprotect logging'
49219 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49220 + help
49221 + If you say Y here, calls to mmap() and mprotect() with explicit
49222 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49223 + denied by the PAX_MPROTECT feature. If the sysctl option is
49224 + enabled, a sysctl option with name "rwxmap_logging" is created.
49225 +
49226 +config GRKERNSEC_AUDIT_TEXTREL
49227 + bool 'ELF text relocations logging (READ HELP)'
49228 + depends on PAX_MPROTECT
49229 + help
49230 + If you say Y here, text relocations will be logged with the filename
49231 + of the offending library or binary. The purpose of the feature is
49232 + to help Linux distribution developers get rid of libraries and
49233 + binaries that need text relocations which hinder the future progress
49234 + of PaX. Only Linux distribution developers should say Y here, and
49235 + never on a production machine, as this option creates an information
49236 + leak that could aid an attacker in defeating the randomization of
49237 + a single memory region. If the sysctl option is enabled, a sysctl
49238 + option with name "audit_textrel" is created.
49239 +
49240 +endmenu
49241 +
49242 +menu "Executable Protections"
49243 +depends on GRKERNSEC
49244 +
49245 +config GRKERNSEC_DMESG
49246 + bool "Dmesg(8) restriction"
49247 + help
49248 + If you say Y here, non-root users will not be able to use dmesg(8)
49249 + to view up to the last 4kb of messages in the kernel's log buffer.
49250 + The kernel's log buffer often contains kernel addresses and other
49251 + identifying information useful to an attacker in fingerprinting a
49252 + system for a targeted exploit.
49253 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49254 + created.
49255 +
49256 +config GRKERNSEC_HARDEN_PTRACE
49257 + bool "Deter ptrace-based process snooping"
49258 + help
49259 + If you say Y here, TTY sniffers and other malicious monitoring
49260 + programs implemented through ptrace will be defeated. If you
49261 + have been using the RBAC system, this option has already been
49262 + enabled for several years for all users, with the ability to make
49263 + fine-grained exceptions.
49264 +
49265 + This option only affects the ability of non-root users to ptrace
49266 + processes that are not a descendent of the ptracing process.
49267 + This means that strace ./binary and gdb ./binary will still work,
49268 + but attaching to arbitrary processes will not. If the sysctl
49269 + option is enabled, a sysctl option with name "harden_ptrace" is
49270 + created.
49271 +
49272 +config GRKERNSEC_PTRACE_READEXEC
49273 + bool "Require read access to ptrace sensitive binaries"
49274 + help
49275 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49276 + binaries. This option is useful in environments that
49277 + remove the read bits (e.g. file mode 4711) from suid binaries to
49278 + prevent infoleaking of their contents. This option adds
49279 + consistency to the use of that file mode, as the binary could normally
49280 + be read out when run without privileges while ptracing.
49281 +
49282 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49283 + is created.
49284 +
49285 +config GRKERNSEC_SETXID
49286 + bool "Enforce consistent multithreaded privileges"
49287 + help
49288 + If you say Y here, a change from a root uid to a non-root uid
49289 + in a multithreaded application will cause the resulting uids,
49290 + gids, supplementary groups, and capabilities in that thread
49291 + to be propagated to the other threads of the process. In most
49292 + cases this is unnecessary, as glibc will emulate this behavior
49293 + on behalf of the application. Other libcs do not act in the
49294 + same way, allowing the other threads of the process to continue
49295 + running with root privileges. If the sysctl option is enabled,
49296 + a sysctl option with name "consistent_setxid" is created.
49297 +
49298 +config GRKERNSEC_TPE
49299 + bool "Trusted Path Execution (TPE)"
49300 + help
49301 + If you say Y here, you will be able to choose a gid to add to the
49302 + supplementary groups of users you want to mark as "untrusted."
49303 + These users will not be able to execute any files that are not in
49304 + root-owned directories writable only by root. If the sysctl option
49305 + is enabled, a sysctl option with name "tpe" is created.
49306 +
49307 +config GRKERNSEC_TPE_ALL
49308 + bool "Partially restrict all non-root users"
49309 + depends on GRKERNSEC_TPE
49310 + help
49311 + If you say Y here, all non-root users will be covered under
49312 + a weaker TPE restriction. This is separate from, and in addition to,
49313 + the main TPE options that you have selected elsewhere. Thus, if a
49314 + "trusted" GID is chosen, this restriction applies to even that GID.
49315 + Under this restriction, all non-root users will only be allowed to
49316 + execute files in directories they own that are not group or
49317 + world-writable, or in directories owned by root and writable only by
49318 + root. If the sysctl option is enabled, a sysctl option with name
49319 + "tpe_restrict_all" is created.
49320 +
49321 +config GRKERNSEC_TPE_INVERT
49322 + bool "Invert GID option"
49323 + depends on GRKERNSEC_TPE
49324 + help
49325 + If you say Y here, the group you specify in the TPE configuration will
49326 + decide what group TPE restrictions will be *disabled* for. This
49327 + option is useful if you want TPE restrictions to be applied to most
49328 + users on the system. If the sysctl option is enabled, a sysctl option
49329 + with name "tpe_invert" is created. Unlike other sysctl options, this
49330 + entry will default to on for backward-compatibility.
49331 +
49332 +config GRKERNSEC_TPE_GID
49333 + int "GID for untrusted users"
49334 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49335 + default 1005
49336 + help
49337 + Setting this GID determines what group TPE restrictions will be
49338 + *enabled* for. If the sysctl option is enabled, a sysctl option
49339 + with name "tpe_gid" is created.
49340 +
49341 +config GRKERNSEC_TPE_GID
49342 + int "GID for trusted users"
49343 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49344 + default 1005
49345 + help
49346 + Setting this GID determines what group TPE restrictions will be
49347 + *disabled* for. If the sysctl option is enabled, a sysctl option
49348 + with name "tpe_gid" is created.
49349 +
49350 +endmenu
49351 +menu "Network Protections"
49352 +depends on GRKERNSEC
49353 +
49354 +config GRKERNSEC_RANDNET
49355 + bool "Larger entropy pools"
49356 + help
49357 + If you say Y here, the entropy pools used for many features of Linux
49358 + and grsecurity will be doubled in size. Since several grsecurity
49359 + features use additional randomness, it is recommended that you say Y
49360 + here. Saying Y here has a similar effect as modifying
49361 + /proc/sys/kernel/random/poolsize.
49362 +
49363 +config GRKERNSEC_BLACKHOLE
49364 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49365 + depends on NET
49366 + help
49367 + If you say Y here, neither TCP resets nor ICMP
49368 + destination-unreachable packets will be sent in response to packets
49369 + sent to ports for which no associated listening process exists.
49370 + This feature supports both IPV4 and IPV6 and exempts the
49371 + loopback interface from blackholing. Enabling this feature
49372 + makes a host more resilient to DoS attacks and reduces network
49373 + visibility against scanners.
49374 +
49375 + The blackhole feature as-implemented is equivalent to the FreeBSD
49376 + blackhole feature, as it prevents RST responses to all packets, not
49377 + just SYNs. Under most application behavior this causes no
49378 + problems, but applications (like haproxy) may not close certain
49379 + connections in a way that cleanly terminates them on the remote
49380 + end, leaving the remote host in LAST_ACK state. Because of this
49381 + side-effect and to prevent intentional LAST_ACK DoSes, this
49382 + feature also adds automatic mitigation against such attacks.
49383 + The mitigation drastically reduces the amount of time a socket
49384 + can spend in LAST_ACK state. If you're using haproxy and not
49385 + all servers it connects to have this option enabled, consider
49386 + disabling this feature on the haproxy host.
49387 +
49388 + If the sysctl option is enabled, two sysctl options with names
49389 + "ip_blackhole" and "lastack_retries" will be created.
49390 + While "ip_blackhole" takes the standard zero/non-zero on/off
49391 + toggle, "lastack_retries" uses the same kinds of values as
49392 + "tcp_retries1" and "tcp_retries2". The default value of 4
49393 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49394 + state.
49395 +
49396 +config GRKERNSEC_SOCKET
49397 + bool "Socket restrictions"
49398 + depends on NET
49399 + help
49400 + If you say Y here, you will be able to choose from several options.
49401 + If you assign a GID on your system and add it to the supplementary
49402 + groups of users you want to restrict socket access to, this patch
49403 + will perform up to three things, based on the option(s) you choose.
49404 +
49405 +config GRKERNSEC_SOCKET_ALL
49406 + bool "Deny any sockets to group"
49407 + depends on GRKERNSEC_SOCKET
49408 + help
49409 + If you say Y here, you will be able to choose a GID of whose users will
49410 + be unable to connect to other hosts from your machine or run server
49411 + applications from your machine. If the sysctl option is enabled, a
49412 + sysctl option with name "socket_all" is created.
49413 +
49414 +config GRKERNSEC_SOCKET_ALL_GID
49415 + int "GID to deny all sockets for"
49416 + depends on GRKERNSEC_SOCKET_ALL
49417 + default 1004
49418 + help
49419 + Here you can choose the GID to disable socket access for. Remember to
49420 + add the users you want socket access disabled for to the GID
49421 + specified here. If the sysctl option is enabled, a sysctl option
49422 + with name "socket_all_gid" is created.
49423 +
49424 +config GRKERNSEC_SOCKET_CLIENT
49425 + bool "Deny client sockets to group"
49426 + depends on GRKERNSEC_SOCKET
49427 + help
49428 + If you say Y here, you will be able to choose a GID of whose users will
49429 + be unable to connect to other hosts from your machine, but will be
49430 + able to run servers. If this option is enabled, all users in the group
49431 + you specify will have to use passive mode when initiating ftp transfers
49432 + from the shell on your machine. If the sysctl option is enabled, a
49433 + sysctl option with name "socket_client" is created.
49434 +
49435 +config GRKERNSEC_SOCKET_CLIENT_GID
49436 + int "GID to deny client sockets for"
49437 + depends on GRKERNSEC_SOCKET_CLIENT
49438 + default 1003
49439 + help
49440 + Here you can choose the GID to disable client socket access for.
49441 + Remember to add the users you want client socket access disabled for to
49442 + the GID specified here. If the sysctl option is enabled, a sysctl
49443 + option with name "socket_client_gid" is created.
49444 +
49445 +config GRKERNSEC_SOCKET_SERVER
49446 + bool "Deny server sockets to group"
49447 + depends on GRKERNSEC_SOCKET
49448 + help
49449 + If you say Y here, you will be able to choose a GID of whose users will
49450 + be unable to run server applications from your machine. If the sysctl
49451 + option is enabled, a sysctl option with name "socket_server" is created.
49452 +
49453 +config GRKERNSEC_SOCKET_SERVER_GID
49454 + int "GID to deny server sockets for"
49455 + depends on GRKERNSEC_SOCKET_SERVER
49456 + default 1002
49457 + help
49458 + Here you can choose the GID to disable server socket access for.
49459 + Remember to add the users you want server socket access disabled for to
49460 + the GID specified here. If the sysctl option is enabled, a sysctl
49461 + option with name "socket_server_gid" is created.
49462 +
49463 +endmenu
49464 +menu "Sysctl support"
49465 +depends on GRKERNSEC && SYSCTL
49466 +
49467 +config GRKERNSEC_SYSCTL
49468 + bool "Sysctl support"
49469 + help
49470 + If you say Y here, you will be able to change the options that
49471 + grsecurity runs with at bootup, without having to recompile your
49472 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49473 + to enable (1) or disable (0) various features. All the sysctl entries
49474 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49475 + All features enabled in the kernel configuration are disabled at boot
49476 + if you do not say Y to the "Turn on features by default" option.
49477 + All options should be set at startup, and the grsec_lock entry should
49478 + be set to a non-zero value after all the options are set.
49479 + *THIS IS EXTREMELY IMPORTANT*
49480 +
49481 +config GRKERNSEC_SYSCTL_DISTRO
49482 + bool "Extra sysctl support for distro makers (READ HELP)"
49483 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49484 + help
49485 + If you say Y here, additional sysctl options will be created
49486 + for features that affect processes running as root. Therefore,
49487 + it is critical when using this option that the grsec_lock entry be
49488 + enabled after boot. Only distros with prebuilt kernel packages
49489 + with this option enabled that can ensure grsec_lock is enabled
49490 + after boot should use this option.
49491 + *Failure to set grsec_lock after boot makes all grsec features
49492 + this option covers useless*
49493 +
49494 + Currently this option creates the following sysctl entries:
49495 + "Disable Privileged I/O": "disable_priv_io"
49496 +
49497 +config GRKERNSEC_SYSCTL_ON
49498 + bool "Turn on features by default"
49499 + depends on GRKERNSEC_SYSCTL
49500 + help
49501 + If you say Y here, instead of having all features enabled in the
49502 + kernel configuration disabled at boot time, the features will be
49503 + enabled at boot time. It is recommended you say Y here unless
49504 + there is some reason you would want all sysctl-tunable features to
49505 + be disabled by default. As mentioned elsewhere, it is important
49506 + to enable the grsec_lock entry once you have finished modifying
49507 + the sysctl entries.
49508 +
49509 +endmenu
49510 +menu "Logging Options"
49511 +depends on GRKERNSEC
49512 +
49513 +config GRKERNSEC_FLOODTIME
49514 + int "Seconds in between log messages (minimum)"
49515 + default 10
49516 + help
49517 + This option allows you to enforce the number of seconds between
49518 + grsecurity log messages. The default should be suitable for most
49519 + people, however, if you choose to change it, choose a value small enough
49520 + to allow informative logs to be produced, but large enough to
49521 + prevent flooding.
49522 +
49523 +config GRKERNSEC_FLOODBURST
49524 + int "Number of messages in a burst (maximum)"
49525 + default 6
49526 + help
49527 + This option allows you to choose the maximum number of messages allowed
49528 + within the flood time interval you chose in a separate option. The
49529 + default should be suitable for most people, however if you find that
49530 + many of your logs are being interpreted as flooding, you may want to
49531 + raise this value.
49532 +
49533 +endmenu
49534 +
49535 +endmenu
49536 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
49537 new file mode 100644
49538 index 0000000..1b9afa9
49539 --- /dev/null
49540 +++ b/grsecurity/Makefile
49541 @@ -0,0 +1,38 @@
49542 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
49543 +# during 2001-2009 it has been completely redesigned by Brad Spengler
49544 +# into an RBAC system
49545 +#
49546 +# All code in this directory and various hooks inserted throughout the kernel
49547 +# are copyright Brad Spengler - Open Source Security, Inc., and released
49548 +# under the GPL v2 or higher
49549 +
49550 +KBUILD_CFLAGS += -Werror
49551 +
49552 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
49553 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
49554 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
49555 +
49556 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
49557 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
49558 + gracl_learn.o grsec_log.o
49559 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
49560 +
49561 +ifdef CONFIG_NET
49562 +obj-y += grsec_sock.o
49563 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
49564 +endif
49565 +
49566 +ifndef CONFIG_GRKERNSEC
49567 +obj-y += grsec_disabled.o
49568 +endif
49569 +
49570 +ifdef CONFIG_GRKERNSEC_HIDESYM
49571 +extra-y := grsec_hidesym.o
49572 +$(obj)/grsec_hidesym.o:
49573 + @-chmod -f 500 /boot
49574 + @-chmod -f 500 /lib/modules
49575 + @-chmod -f 500 /lib64/modules
49576 + @-chmod -f 500 /lib32/modules
49577 + @-chmod -f 700 .
49578 + @echo ' grsec: protected kernel image paths'
49579 +endif
49580 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
49581 new file mode 100644
49582 index 0000000..e8c5d41
49583 --- /dev/null
49584 +++ b/grsecurity/gracl.c
49585 @@ -0,0 +1,4179 @@
49586 +#include <linux/kernel.h>
49587 +#include <linux/module.h>
49588 +#include <linux/sched.h>
49589 +#include <linux/mm.h>
49590 +#include <linux/file.h>
49591 +#include <linux/fs.h>
49592 +#include <linux/namei.h>
49593 +#include <linux/mount.h>
49594 +#include <linux/tty.h>
49595 +#include <linux/proc_fs.h>
49596 +#include <linux/lglock.h>
49597 +#include <linux/slab.h>
49598 +#include <linux/vmalloc.h>
49599 +#include <linux/types.h>
49600 +#include <linux/sysctl.h>
49601 +#include <linux/netdevice.h>
49602 +#include <linux/ptrace.h>
49603 +#include <linux/gracl.h>
49604 +#include <linux/gralloc.h>
49605 +#include <linux/security.h>
49606 +#include <linux/grinternal.h>
49607 +#include <linux/pid_namespace.h>
49608 +#include <linux/fdtable.h>
49609 +#include <linux/percpu.h>
49610 +#include "../fs/mount.h"
49611 +
49612 +#include <asm/uaccess.h>
49613 +#include <asm/errno.h>
49614 +#include <asm/mman.h>
49615 +
49616 +static struct acl_role_db acl_role_set;
49617 +static struct name_db name_set;
49618 +static struct inodev_db inodev_set;
49619 +
49620 +/* for keeping track of userspace pointers used for subjects, so we
49621 + can share references in the kernel as well
49622 +*/
49623 +
49624 +static struct path real_root;
49625 +
49626 +static struct acl_subj_map_db subj_map_set;
49627 +
49628 +static struct acl_role_label *default_role;
49629 +
49630 +static struct acl_role_label *role_list;
49631 +
49632 +static u16 acl_sp_role_value;
49633 +
49634 +extern char *gr_shared_page[4];
49635 +static DEFINE_MUTEX(gr_dev_mutex);
49636 +DEFINE_RWLOCK(gr_inode_lock);
49637 +
49638 +struct gr_arg *gr_usermode;
49639 +
49640 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
49641 +
49642 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
49643 +extern void gr_clear_learn_entries(void);
49644 +
49645 +#ifdef CONFIG_GRKERNSEC_RESLOG
49646 +extern void gr_log_resource(const struct task_struct *task,
49647 + const int res, const unsigned long wanted, const int gt);
49648 +#endif
49649 +
49650 +unsigned char *gr_system_salt;
49651 +unsigned char *gr_system_sum;
49652 +
49653 +static struct sprole_pw **acl_special_roles = NULL;
49654 +static __u16 num_sprole_pws = 0;
49655 +
49656 +static struct acl_role_label *kernel_role = NULL;
49657 +
49658 +static unsigned int gr_auth_attempts = 0;
49659 +static unsigned long gr_auth_expires = 0UL;
49660 +
49661 +#ifdef CONFIG_NET
49662 +extern struct vfsmount *sock_mnt;
49663 +#endif
49664 +
49665 +extern struct vfsmount *pipe_mnt;
49666 +extern struct vfsmount *shm_mnt;
49667 +#ifdef CONFIG_HUGETLBFS
49668 +extern struct vfsmount *hugetlbfs_vfsmount;
49669 +#endif
49670 +
49671 +static struct acl_object_label *fakefs_obj_rw;
49672 +static struct acl_object_label *fakefs_obj_rwx;
49673 +
49674 +extern int gr_init_uidset(void);
49675 +extern void gr_free_uidset(void);
49676 +extern void gr_remove_uid(uid_t uid);
49677 +extern int gr_find_uid(uid_t uid);
49678 +
49679 +DECLARE_BRLOCK(vfsmount_lock);
49680 +
49681 +__inline__ int
49682 +gr_acl_is_enabled(void)
49683 +{
49684 + return (gr_status & GR_READY);
49685 +}
49686 +
49687 +#ifdef CONFIG_BTRFS_FS
49688 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49689 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49690 +#endif
49691 +
49692 +static inline dev_t __get_dev(const struct dentry *dentry)
49693 +{
49694 +#ifdef CONFIG_BTRFS_FS
49695 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49696 + return get_btrfs_dev_from_inode(dentry->d_inode);
49697 + else
49698 +#endif
49699 + return dentry->d_inode->i_sb->s_dev;
49700 +}
49701 +
49702 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
49703 +{
49704 + return __get_dev(dentry);
49705 +}
49706 +
49707 +static char gr_task_roletype_to_char(struct task_struct *task)
49708 +{
49709 + switch (task->role->roletype &
49710 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
49711 + GR_ROLE_SPECIAL)) {
49712 + case GR_ROLE_DEFAULT:
49713 + return 'D';
49714 + case GR_ROLE_USER:
49715 + return 'U';
49716 + case GR_ROLE_GROUP:
49717 + return 'G';
49718 + case GR_ROLE_SPECIAL:
49719 + return 'S';
49720 + }
49721 +
49722 + return 'X';
49723 +}
49724 +
49725 +char gr_roletype_to_char(void)
49726 +{
49727 + return gr_task_roletype_to_char(current);
49728 +}
49729 +
49730 +__inline__ int
49731 +gr_acl_tpe_check(void)
49732 +{
49733 + if (unlikely(!(gr_status & GR_READY)))
49734 + return 0;
49735 + if (current->role->roletype & GR_ROLE_TPE)
49736 + return 1;
49737 + else
49738 + return 0;
49739 +}
49740 +
49741 +int
49742 +gr_handle_rawio(const struct inode *inode)
49743 +{
49744 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49745 + if (inode && S_ISBLK(inode->i_mode) &&
49746 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49747 + !capable(CAP_SYS_RAWIO))
49748 + return 1;
49749 +#endif
49750 + return 0;
49751 +}
49752 +
49753 +static int
49754 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
49755 +{
49756 + if (likely(lena != lenb))
49757 + return 0;
49758 +
49759 + return !memcmp(a, b, lena);
49760 +}
49761 +
49762 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
49763 +{
49764 + *buflen -= namelen;
49765 + if (*buflen < 0)
49766 + return -ENAMETOOLONG;
49767 + *buffer -= namelen;
49768 + memcpy(*buffer, str, namelen);
49769 + return 0;
49770 +}
49771 +
49772 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
49773 +{
49774 + return prepend(buffer, buflen, name->name, name->len);
49775 +}
49776 +
49777 +static int prepend_path(const struct path *path, struct path *root,
49778 + char **buffer, int *buflen)
49779 +{
49780 + struct dentry *dentry = path->dentry;
49781 + struct vfsmount *vfsmnt = path->mnt;
49782 + struct mount *mnt = real_mount(vfsmnt);
49783 + bool slash = false;
49784 + int error = 0;
49785 +
49786 + while (dentry != root->dentry || vfsmnt != root->mnt) {
49787 + struct dentry * parent;
49788 +
49789 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
49790 + /* Global root? */
49791 + if (!mnt_has_parent(mnt)) {
49792 + goto out;
49793 + }
49794 + dentry = mnt->mnt_mountpoint;
49795 + mnt = mnt->mnt_parent;
49796 + vfsmnt = &mnt->mnt;
49797 + continue;
49798 + }
49799 + parent = dentry->d_parent;
49800 + prefetch(parent);
49801 + spin_lock(&dentry->d_lock);
49802 + error = prepend_name(buffer, buflen, &dentry->d_name);
49803 + spin_unlock(&dentry->d_lock);
49804 + if (!error)
49805 + error = prepend(buffer, buflen, "/", 1);
49806 + if (error)
49807 + break;
49808 +
49809 + slash = true;
49810 + dentry = parent;
49811 + }
49812 +
49813 +out:
49814 + if (!error && !slash)
49815 + error = prepend(buffer, buflen, "/", 1);
49816 +
49817 + return error;
49818 +}
49819 +
49820 +/* this must be called with vfsmount_lock and rename_lock held */
49821 +
49822 +static char *__our_d_path(const struct path *path, struct path *root,
49823 + char *buf, int buflen)
49824 +{
49825 + char *res = buf + buflen;
49826 + int error;
49827 +
49828 + prepend(&res, &buflen, "\0", 1);
49829 + error = prepend_path(path, root, &res, &buflen);
49830 + if (error)
49831 + return ERR_PTR(error);
49832 +
49833 + return res;
49834 +}
49835 +
49836 +static char *
49837 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
49838 +{
49839 + char *retval;
49840 +
49841 + retval = __our_d_path(path, root, buf, buflen);
49842 + if (unlikely(IS_ERR(retval)))
49843 + retval = strcpy(buf, "<path too long>");
49844 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
49845 + retval[1] = '\0';
49846 +
49847 + return retval;
49848 +}
49849 +
49850 +static char *
49851 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
49852 + char *buf, int buflen)
49853 +{
49854 + struct path path;
49855 + char *res;
49856 +
49857 + path.dentry = (struct dentry *)dentry;
49858 + path.mnt = (struct vfsmount *)vfsmnt;
49859 +
49860 + /* we can use real_root.dentry, real_root.mnt, because this is only called
49861 + by the RBAC system */
49862 + res = gen_full_path(&path, &real_root, buf, buflen);
49863 +
49864 + return res;
49865 +}
49866 +
49867 +static char *
49868 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
49869 + char *buf, int buflen)
49870 +{
49871 + char *res;
49872 + struct path path;
49873 + struct path root;
49874 + struct task_struct *reaper = &init_task;
49875 +
49876 + path.dentry = (struct dentry *)dentry;
49877 + path.mnt = (struct vfsmount *)vfsmnt;
49878 +
49879 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
49880 + get_fs_root(reaper->fs, &root);
49881 +
49882 + write_seqlock(&rename_lock);
49883 + br_read_lock(vfsmount_lock);
49884 + res = gen_full_path(&path, &root, buf, buflen);
49885 + br_read_unlock(vfsmount_lock);
49886 + write_sequnlock(&rename_lock);
49887 +
49888 + path_put(&root);
49889 + return res;
49890 +}
49891 +
49892 +static char *
49893 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
49894 +{
49895 + char *ret;
49896 + write_seqlock(&rename_lock);
49897 + br_read_lock(vfsmount_lock);
49898 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
49899 + PAGE_SIZE);
49900 + br_read_unlock(vfsmount_lock);
49901 + write_sequnlock(&rename_lock);
49902 + return ret;
49903 +}
49904 +
49905 +static char *
49906 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
49907 +{
49908 + char *ret;
49909 + char *buf;
49910 + int buflen;
49911 +
49912 + write_seqlock(&rename_lock);
49913 + br_read_lock(vfsmount_lock);
49914 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
49915 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
49916 + buflen = (int)(ret - buf);
49917 + if (buflen >= 5)
49918 + prepend(&ret, &buflen, "/proc", 5);
49919 + else
49920 + ret = strcpy(buf, "<path too long>");
49921 + br_read_unlock(vfsmount_lock);
49922 + write_sequnlock(&rename_lock);
49923 + return ret;
49924 +}
49925 +
49926 +char *
49927 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
49928 +{
49929 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
49930 + PAGE_SIZE);
49931 +}
49932 +
49933 +char *
49934 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
49935 +{
49936 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
49937 + PAGE_SIZE);
49938 +}
49939 +
49940 +char *
49941 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
49942 +{
49943 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
49944 + PAGE_SIZE);
49945 +}
49946 +
49947 +char *
49948 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
49949 +{
49950 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
49951 + PAGE_SIZE);
49952 +}
49953 +
49954 +char *
49955 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
49956 +{
49957 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
49958 + PAGE_SIZE);
49959 +}
49960 +
49961 +__inline__ __u32
49962 +to_gr_audit(const __u32 reqmode)
49963 +{
49964 + /* masks off auditable permission flags, then shifts them to create
49965 + auditing flags, and adds the special case of append auditing if
49966 + we're requesting write */
49967 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
49968 +}
49969 +
49970 +struct acl_subject_label *
49971 +lookup_subject_map(const struct acl_subject_label *userp)
49972 +{
49973 + unsigned int index = shash(userp, subj_map_set.s_size);
49974 + struct subject_map *match;
49975 +
49976 + match = subj_map_set.s_hash[index];
49977 +
49978 + while (match && match->user != userp)
49979 + match = match->next;
49980 +
49981 + if (match != NULL)
49982 + return match->kernel;
49983 + else
49984 + return NULL;
49985 +}
49986 +
49987 +static void
49988 +insert_subj_map_entry(struct subject_map *subjmap)
49989 +{
49990 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
49991 + struct subject_map **curr;
49992 +
49993 + subjmap->prev = NULL;
49994 +
49995 + curr = &subj_map_set.s_hash[index];
49996 + if (*curr != NULL)
49997 + (*curr)->prev = subjmap;
49998 +
49999 + subjmap->next = *curr;
50000 + *curr = subjmap;
50001 +
50002 + return;
50003 +}
50004 +
50005 +static struct acl_role_label *
50006 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50007 + const gid_t gid)
50008 +{
50009 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50010 + struct acl_role_label *match;
50011 + struct role_allowed_ip *ipp;
50012 + unsigned int x;
50013 + u32 curr_ip = task->signal->curr_ip;
50014 +
50015 + task->signal->saved_ip = curr_ip;
50016 +
50017 + match = acl_role_set.r_hash[index];
50018 +
50019 + while (match) {
50020 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50021 + for (x = 0; x < match->domain_child_num; x++) {
50022 + if (match->domain_children[x] == uid)
50023 + goto found;
50024 + }
50025 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50026 + break;
50027 + match = match->next;
50028 + }
50029 +found:
50030 + if (match == NULL) {
50031 + try_group:
50032 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50033 + match = acl_role_set.r_hash[index];
50034 +
50035 + while (match) {
50036 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50037 + for (x = 0; x < match->domain_child_num; x++) {
50038 + if (match->domain_children[x] == gid)
50039 + goto found2;
50040 + }
50041 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50042 + break;
50043 + match = match->next;
50044 + }
50045 +found2:
50046 + if (match == NULL)
50047 + match = default_role;
50048 + if (match->allowed_ips == NULL)
50049 + return match;
50050 + else {
50051 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50052 + if (likely
50053 + ((ntohl(curr_ip) & ipp->netmask) ==
50054 + (ntohl(ipp->addr) & ipp->netmask)))
50055 + return match;
50056 + }
50057 + match = default_role;
50058 + }
50059 + } else if (match->allowed_ips == NULL) {
50060 + return match;
50061 + } else {
50062 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50063 + if (likely
50064 + ((ntohl(curr_ip) & ipp->netmask) ==
50065 + (ntohl(ipp->addr) & ipp->netmask)))
50066 + return match;
50067 + }
50068 + goto try_group;
50069 + }
50070 +
50071 + return match;
50072 +}
50073 +
50074 +struct acl_subject_label *
50075 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50076 + const struct acl_role_label *role)
50077 +{
50078 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50079 + struct acl_subject_label *match;
50080 +
50081 + match = role->subj_hash[index];
50082 +
50083 + while (match && (match->inode != ino || match->device != dev ||
50084 + (match->mode & GR_DELETED))) {
50085 + match = match->next;
50086 + }
50087 +
50088 + if (match && !(match->mode & GR_DELETED))
50089 + return match;
50090 + else
50091 + return NULL;
50092 +}
50093 +
50094 +struct acl_subject_label *
50095 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50096 + const struct acl_role_label *role)
50097 +{
50098 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50099 + struct acl_subject_label *match;
50100 +
50101 + match = role->subj_hash[index];
50102 +
50103 + while (match && (match->inode != ino || match->device != dev ||
50104 + !(match->mode & GR_DELETED))) {
50105 + match = match->next;
50106 + }
50107 +
50108 + if (match && (match->mode & GR_DELETED))
50109 + return match;
50110 + else
50111 + return NULL;
50112 +}
50113 +
50114 +static struct acl_object_label *
50115 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50116 + const struct acl_subject_label *subj)
50117 +{
50118 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50119 + struct acl_object_label *match;
50120 +
50121 + match = subj->obj_hash[index];
50122 +
50123 + while (match && (match->inode != ino || match->device != dev ||
50124 + (match->mode & GR_DELETED))) {
50125 + match = match->next;
50126 + }
50127 +
50128 + if (match && !(match->mode & GR_DELETED))
50129 + return match;
50130 + else
50131 + return NULL;
50132 +}
50133 +
50134 +static struct acl_object_label *
50135 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50136 + const struct acl_subject_label *subj)
50137 +{
50138 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50139 + struct acl_object_label *match;
50140 +
50141 + match = subj->obj_hash[index];
50142 +
50143 + while (match && (match->inode != ino || match->device != dev ||
50144 + !(match->mode & GR_DELETED))) {
50145 + match = match->next;
50146 + }
50147 +
50148 + if (match && (match->mode & GR_DELETED))
50149 + return match;
50150 +
50151 + match = subj->obj_hash[index];
50152 +
50153 + while (match && (match->inode != ino || match->device != dev ||
50154 + (match->mode & GR_DELETED))) {
50155 + match = match->next;
50156 + }
50157 +
50158 + if (match && !(match->mode & GR_DELETED))
50159 + return match;
50160 + else
50161 + return NULL;
50162 +}
50163 +
50164 +static struct name_entry *
50165 +lookup_name_entry(const char *name)
50166 +{
50167 + unsigned int len = strlen(name);
50168 + unsigned int key = full_name_hash(name, len);
50169 + unsigned int index = key % name_set.n_size;
50170 + struct name_entry *match;
50171 +
50172 + match = name_set.n_hash[index];
50173 +
50174 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50175 + match = match->next;
50176 +
50177 + return match;
50178 +}
50179 +
50180 +static struct name_entry *
50181 +lookup_name_entry_create(const char *name)
50182 +{
50183 + unsigned int len = strlen(name);
50184 + unsigned int key = full_name_hash(name, len);
50185 + unsigned int index = key % name_set.n_size;
50186 + struct name_entry *match;
50187 +
50188 + match = name_set.n_hash[index];
50189 +
50190 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50191 + !match->deleted))
50192 + match = match->next;
50193 +
50194 + if (match && match->deleted)
50195 + return match;
50196 +
50197 + match = name_set.n_hash[index];
50198 +
50199 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50200 + match->deleted))
50201 + match = match->next;
50202 +
50203 + if (match && !match->deleted)
50204 + return match;
50205 + else
50206 + return NULL;
50207 +}
50208 +
50209 +static struct inodev_entry *
50210 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50211 +{
50212 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50213 + struct inodev_entry *match;
50214 +
50215 + match = inodev_set.i_hash[index];
50216 +
50217 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50218 + match = match->next;
50219 +
50220 + return match;
50221 +}
50222 +
50223 +static void
50224 +insert_inodev_entry(struct inodev_entry *entry)
50225 +{
50226 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50227 + inodev_set.i_size);
50228 + struct inodev_entry **curr;
50229 +
50230 + entry->prev = NULL;
50231 +
50232 + curr = &inodev_set.i_hash[index];
50233 + if (*curr != NULL)
50234 + (*curr)->prev = entry;
50235 +
50236 + entry->next = *curr;
50237 + *curr = entry;
50238 +
50239 + return;
50240 +}
50241 +
50242 +static void
50243 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50244 +{
50245 + unsigned int index =
50246 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50247 + struct acl_role_label **curr;
50248 + struct acl_role_label *tmp, *tmp2;
50249 +
50250 + curr = &acl_role_set.r_hash[index];
50251 +
50252 + /* simple case, slot is empty, just set it to our role */
50253 + if (*curr == NULL) {
50254 + *curr = role;
50255 + } else {
50256 + /* example:
50257 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
50258 + 2 -> 3
50259 + */
50260 + /* first check to see if we can already be reached via this slot */
50261 + tmp = *curr;
50262 + while (tmp && tmp != role)
50263 + tmp = tmp->next;
50264 + if (tmp == role) {
50265 + /* we don't need to add ourselves to this slot's chain */
50266 + return;
50267 + }
50268 + /* we need to add ourselves to this chain, two cases */
50269 + if (role->next == NULL) {
50270 + /* simple case, append the current chain to our role */
50271 + role->next = *curr;
50272 + *curr = role;
50273 + } else {
50274 + /* 1 -> 2 -> 3 -> 4
50275 + 2 -> 3 -> 4
50276 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50277 + */
50278 + /* trickier case: walk our role's chain until we find
50279 + the role for the start of the current slot's chain */
50280 + tmp = role;
50281 + tmp2 = *curr;
50282 + while (tmp->next && tmp->next != tmp2)
50283 + tmp = tmp->next;
50284 + if (tmp->next == tmp2) {
50285 + /* from example above, we found 3, so just
50286 + replace this slot's chain with ours */
50287 + *curr = role;
50288 + } else {
50289 + /* we didn't find a subset of our role's chain
50290 + in the current slot's chain, so append their
50291 + chain to ours, and set us as the first role in
50292 + the slot's chain
50293 +
50294 + we could fold this case with the case above,
50295 + but making it explicit for clarity
50296 + */
50297 + tmp->next = tmp2;
50298 + *curr = role;
50299 + }
50300 + }
50301 + }
50302 +
50303 + return;
50304 +}
50305 +
50306 +static void
50307 +insert_acl_role_label(struct acl_role_label *role)
50308 +{
50309 + int i;
50310 +
50311 + if (role_list == NULL) {
50312 + role_list = role;
50313 + role->prev = NULL;
50314 + } else {
50315 + role->prev = role_list;
50316 + role_list = role;
50317 + }
50318 +
50319 + /* used for hash chains */
50320 + role->next = NULL;
50321 +
50322 + if (role->roletype & GR_ROLE_DOMAIN) {
50323 + for (i = 0; i < role->domain_child_num; i++)
50324 + __insert_acl_role_label(role, role->domain_children[i]);
50325 + } else
50326 + __insert_acl_role_label(role, role->uidgid);
50327 +}
50328 +
50329 +static int
50330 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50331 +{
50332 + struct name_entry **curr, *nentry;
50333 + struct inodev_entry *ientry;
50334 + unsigned int len = strlen(name);
50335 + unsigned int key = full_name_hash(name, len);
50336 + unsigned int index = key % name_set.n_size;
50337 +
50338 + curr = &name_set.n_hash[index];
50339 +
50340 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50341 + curr = &((*curr)->next);
50342 +
50343 + if (*curr != NULL)
50344 + return 1;
50345 +
50346 + nentry = acl_alloc(sizeof (struct name_entry));
50347 + if (nentry == NULL)
50348 + return 0;
50349 + ientry = acl_alloc(sizeof (struct inodev_entry));
50350 + if (ientry == NULL)
50351 + return 0;
50352 + ientry->nentry = nentry;
50353 +
50354 + nentry->key = key;
50355 + nentry->name = name;
50356 + nentry->inode = inode;
50357 + nentry->device = device;
50358 + nentry->len = len;
50359 + nentry->deleted = deleted;
50360 +
50361 + nentry->prev = NULL;
50362 + curr = &name_set.n_hash[index];
50363 + if (*curr != NULL)
50364 + (*curr)->prev = nentry;
50365 + nentry->next = *curr;
50366 + *curr = nentry;
50367 +
50368 + /* insert us into the table searchable by inode/dev */
50369 + insert_inodev_entry(ientry);
50370 +
50371 + return 1;
50372 +}
50373 +
50374 +static void
50375 +insert_acl_obj_label(struct acl_object_label *obj,
50376 + struct acl_subject_label *subj)
50377 +{
50378 + unsigned int index =
50379 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50380 + struct acl_object_label **curr;
50381 +
50382 +
50383 + obj->prev = NULL;
50384 +
50385 + curr = &subj->obj_hash[index];
50386 + if (*curr != NULL)
50387 + (*curr)->prev = obj;
50388 +
50389 + obj->next = *curr;
50390 + *curr = obj;
50391 +
50392 + return;
50393 +}
50394 +
50395 +static void
50396 +insert_acl_subj_label(struct acl_subject_label *obj,
50397 + struct acl_role_label *role)
50398 +{
50399 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50400 + struct acl_subject_label **curr;
50401 +
50402 + obj->prev = NULL;
50403 +
50404 + curr = &role->subj_hash[index];
50405 + if (*curr != NULL)
50406 + (*curr)->prev = obj;
50407 +
50408 + obj->next = *curr;
50409 + *curr = obj;
50410 +
50411 + return;
50412 +}
50413 +
50414 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50415 +
50416 +static void *
50417 +create_table(__u32 * len, int elementsize)
50418 +{
50419 + unsigned int table_sizes[] = {
50420 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50421 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50422 + 4194301, 8388593, 16777213, 33554393, 67108859
50423 + };
50424 + void *newtable = NULL;
50425 + unsigned int pwr = 0;
50426 +
50427 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50428 + table_sizes[pwr] <= *len)
50429 + pwr++;
50430 +
50431 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50432 + return newtable;
50433 +
50434 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50435 + newtable =
50436 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50437 + else
50438 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50439 +
50440 + *len = table_sizes[pwr];
50441 +
50442 + return newtable;
50443 +}
50444 +
50445 +static int
50446 +init_variables(const struct gr_arg *arg)
50447 +{
50448 + struct task_struct *reaper = &init_task;
50449 + unsigned int stacksize;
50450 +
50451 + subj_map_set.s_size = arg->role_db.num_subjects;
50452 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50453 + name_set.n_size = arg->role_db.num_objects;
50454 + inodev_set.i_size = arg->role_db.num_objects;
50455 +
50456 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50457 + !name_set.n_size || !inodev_set.i_size)
50458 + return 1;
50459 +
50460 + if (!gr_init_uidset())
50461 + return 1;
50462 +
50463 + /* set up the stack that holds allocation info */
50464 +
50465 + stacksize = arg->role_db.num_pointers + 5;
50466 +
50467 + if (!acl_alloc_stack_init(stacksize))
50468 + return 1;
50469 +
50470 + /* grab reference for the real root dentry and vfsmount */
50471 + get_fs_root(reaper->fs, &real_root);
50472 +
50473 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50474 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50475 +#endif
50476 +
50477 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50478 + if (fakefs_obj_rw == NULL)
50479 + return 1;
50480 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50481 +
50482 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50483 + if (fakefs_obj_rwx == NULL)
50484 + return 1;
50485 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50486 +
50487 + subj_map_set.s_hash =
50488 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50489 + acl_role_set.r_hash =
50490 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50491 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50492 + inodev_set.i_hash =
50493 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50494 +
50495 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50496 + !name_set.n_hash || !inodev_set.i_hash)
50497 + return 1;
50498 +
50499 + memset(subj_map_set.s_hash, 0,
50500 + sizeof(struct subject_map *) * subj_map_set.s_size);
50501 + memset(acl_role_set.r_hash, 0,
50502 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
50503 + memset(name_set.n_hash, 0,
50504 + sizeof (struct name_entry *) * name_set.n_size);
50505 + memset(inodev_set.i_hash, 0,
50506 + sizeof (struct inodev_entry *) * inodev_set.i_size);
50507 +
50508 + return 0;
50509 +}
50510 +
50511 +/* free information not needed after startup
50512 + currently contains user->kernel pointer mappings for subjects
50513 +*/
50514 +
50515 +static void
50516 +free_init_variables(void)
50517 +{
50518 + __u32 i;
50519 +
50520 + if (subj_map_set.s_hash) {
50521 + for (i = 0; i < subj_map_set.s_size; i++) {
50522 + if (subj_map_set.s_hash[i]) {
50523 + kfree(subj_map_set.s_hash[i]);
50524 + subj_map_set.s_hash[i] = NULL;
50525 + }
50526 + }
50527 +
50528 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
50529 + PAGE_SIZE)
50530 + kfree(subj_map_set.s_hash);
50531 + else
50532 + vfree(subj_map_set.s_hash);
50533 + }
50534 +
50535 + return;
50536 +}
50537 +
50538 +static void
50539 +free_variables(void)
50540 +{
50541 + struct acl_subject_label *s;
50542 + struct acl_role_label *r;
50543 + struct task_struct *task, *task2;
50544 + unsigned int x;
50545 +
50546 + gr_clear_learn_entries();
50547 +
50548 + read_lock(&tasklist_lock);
50549 + do_each_thread(task2, task) {
50550 + task->acl_sp_role = 0;
50551 + task->acl_role_id = 0;
50552 + task->acl = NULL;
50553 + task->role = NULL;
50554 + } while_each_thread(task2, task);
50555 + read_unlock(&tasklist_lock);
50556 +
50557 + /* release the reference to the real root dentry and vfsmount */
50558 + path_put(&real_root);
50559 + memset(&real_root, 0, sizeof(real_root));
50560 +
50561 + /* free all object hash tables */
50562 +
50563 + FOR_EACH_ROLE_START(r)
50564 + if (r->subj_hash == NULL)
50565 + goto next_role;
50566 + FOR_EACH_SUBJECT_START(r, s, x)
50567 + if (s->obj_hash == NULL)
50568 + break;
50569 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50570 + kfree(s->obj_hash);
50571 + else
50572 + vfree(s->obj_hash);
50573 + FOR_EACH_SUBJECT_END(s, x)
50574 + FOR_EACH_NESTED_SUBJECT_START(r, s)
50575 + if (s->obj_hash == NULL)
50576 + break;
50577 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50578 + kfree(s->obj_hash);
50579 + else
50580 + vfree(s->obj_hash);
50581 + FOR_EACH_NESTED_SUBJECT_END(s)
50582 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
50583 + kfree(r->subj_hash);
50584 + else
50585 + vfree(r->subj_hash);
50586 + r->subj_hash = NULL;
50587 +next_role:
50588 + FOR_EACH_ROLE_END(r)
50589 +
50590 + acl_free_all();
50591 +
50592 + if (acl_role_set.r_hash) {
50593 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
50594 + PAGE_SIZE)
50595 + kfree(acl_role_set.r_hash);
50596 + else
50597 + vfree(acl_role_set.r_hash);
50598 + }
50599 + if (name_set.n_hash) {
50600 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
50601 + PAGE_SIZE)
50602 + kfree(name_set.n_hash);
50603 + else
50604 + vfree(name_set.n_hash);
50605 + }
50606 +
50607 + if (inodev_set.i_hash) {
50608 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
50609 + PAGE_SIZE)
50610 + kfree(inodev_set.i_hash);
50611 + else
50612 + vfree(inodev_set.i_hash);
50613 + }
50614 +
50615 + gr_free_uidset();
50616 +
50617 + memset(&name_set, 0, sizeof (struct name_db));
50618 + memset(&inodev_set, 0, sizeof (struct inodev_db));
50619 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
50620 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
50621 +
50622 + default_role = NULL;
50623 + kernel_role = NULL;
50624 + role_list = NULL;
50625 +
50626 + return;
50627 +}
50628 +
50629 +static __u32
50630 +count_user_objs(struct acl_object_label *userp)
50631 +{
50632 + struct acl_object_label o_tmp;
50633 + __u32 num = 0;
50634 +
50635 + while (userp) {
50636 + if (copy_from_user(&o_tmp, userp,
50637 + sizeof (struct acl_object_label)))
50638 + break;
50639 +
50640 + userp = o_tmp.prev;
50641 + num++;
50642 + }
50643 +
50644 + return num;
50645 +}
50646 +
50647 +static struct acl_subject_label *
50648 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
50649 +
50650 +static int
50651 +copy_user_glob(struct acl_object_label *obj)
50652 +{
50653 + struct acl_object_label *g_tmp, **guser;
50654 + unsigned int len;
50655 + char *tmp;
50656 +
50657 + if (obj->globbed == NULL)
50658 + return 0;
50659 +
50660 + guser = &obj->globbed;
50661 + while (*guser) {
50662 + g_tmp = (struct acl_object_label *)
50663 + acl_alloc(sizeof (struct acl_object_label));
50664 + if (g_tmp == NULL)
50665 + return -ENOMEM;
50666 +
50667 + if (copy_from_user(g_tmp, *guser,
50668 + sizeof (struct acl_object_label)))
50669 + return -EFAULT;
50670 +
50671 + len = strnlen_user(g_tmp->filename, PATH_MAX);
50672 +
50673 + if (!len || len >= PATH_MAX)
50674 + return -EINVAL;
50675 +
50676 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50677 + return -ENOMEM;
50678 +
50679 + if (copy_from_user(tmp, g_tmp->filename, len))
50680 + return -EFAULT;
50681 + tmp[len-1] = '\0';
50682 + g_tmp->filename = tmp;
50683 +
50684 + *guser = g_tmp;
50685 + guser = &(g_tmp->next);
50686 + }
50687 +
50688 + return 0;
50689 +}
50690 +
50691 +static int
50692 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
50693 + struct acl_role_label *role)
50694 +{
50695 + struct acl_object_label *o_tmp;
50696 + unsigned int len;
50697 + int ret;
50698 + char *tmp;
50699 +
50700 + while (userp) {
50701 + if ((o_tmp = (struct acl_object_label *)
50702 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
50703 + return -ENOMEM;
50704 +
50705 + if (copy_from_user(o_tmp, userp,
50706 + sizeof (struct acl_object_label)))
50707 + return -EFAULT;
50708 +
50709 + userp = o_tmp->prev;
50710 +
50711 + len = strnlen_user(o_tmp->filename, PATH_MAX);
50712 +
50713 + if (!len || len >= PATH_MAX)
50714 + return -EINVAL;
50715 +
50716 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50717 + return -ENOMEM;
50718 +
50719 + if (copy_from_user(tmp, o_tmp->filename, len))
50720 + return -EFAULT;
50721 + tmp[len-1] = '\0';
50722 + o_tmp->filename = tmp;
50723 +
50724 + insert_acl_obj_label(o_tmp, subj);
50725 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
50726 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
50727 + return -ENOMEM;
50728 +
50729 + ret = copy_user_glob(o_tmp);
50730 + if (ret)
50731 + return ret;
50732 +
50733 + if (o_tmp->nested) {
50734 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
50735 + if (IS_ERR(o_tmp->nested))
50736 + return PTR_ERR(o_tmp->nested);
50737 +
50738 + /* insert into nested subject list */
50739 + o_tmp->nested->next = role->hash->first;
50740 + role->hash->first = o_tmp->nested;
50741 + }
50742 + }
50743 +
50744 + return 0;
50745 +}
50746 +
50747 +static __u32
50748 +count_user_subjs(struct acl_subject_label *userp)
50749 +{
50750 + struct acl_subject_label s_tmp;
50751 + __u32 num = 0;
50752 +
50753 + while (userp) {
50754 + if (copy_from_user(&s_tmp, userp,
50755 + sizeof (struct acl_subject_label)))
50756 + break;
50757 +
50758 + userp = s_tmp.prev;
50759 + /* do not count nested subjects against this count, since
50760 + they are not included in the hash table, but are
50761 + attached to objects. We have already counted
50762 + the subjects in userspace for the allocation
50763 + stack
50764 + */
50765 + if (!(s_tmp.mode & GR_NESTED))
50766 + num++;
50767 + }
50768 +
50769 + return num;
50770 +}
50771 +
50772 +static int
50773 +copy_user_allowedips(struct acl_role_label *rolep)
50774 +{
50775 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
50776 +
50777 + ruserip = rolep->allowed_ips;
50778 +
50779 + while (ruserip) {
50780 + rlast = rtmp;
50781 +
50782 + if ((rtmp = (struct role_allowed_ip *)
50783 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
50784 + return -ENOMEM;
50785 +
50786 + if (copy_from_user(rtmp, ruserip,
50787 + sizeof (struct role_allowed_ip)))
50788 + return -EFAULT;
50789 +
50790 + ruserip = rtmp->prev;
50791 +
50792 + if (!rlast) {
50793 + rtmp->prev = NULL;
50794 + rolep->allowed_ips = rtmp;
50795 + } else {
50796 + rlast->next = rtmp;
50797 + rtmp->prev = rlast;
50798 + }
50799 +
50800 + if (!ruserip)
50801 + rtmp->next = NULL;
50802 + }
50803 +
50804 + return 0;
50805 +}
50806 +
50807 +static int
50808 +copy_user_transitions(struct acl_role_label *rolep)
50809 +{
50810 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
50811 +
50812 + unsigned int len;
50813 + char *tmp;
50814 +
50815 + rusertp = rolep->transitions;
50816 +
50817 + while (rusertp) {
50818 + rlast = rtmp;
50819 +
50820 + if ((rtmp = (struct role_transition *)
50821 + acl_alloc(sizeof (struct role_transition))) == NULL)
50822 + return -ENOMEM;
50823 +
50824 + if (copy_from_user(rtmp, rusertp,
50825 + sizeof (struct role_transition)))
50826 + return -EFAULT;
50827 +
50828 + rusertp = rtmp->prev;
50829 +
50830 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
50831 +
50832 + if (!len || len >= GR_SPROLE_LEN)
50833 + return -EINVAL;
50834 +
50835 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50836 + return -ENOMEM;
50837 +
50838 + if (copy_from_user(tmp, rtmp->rolename, len))
50839 + return -EFAULT;
50840 + tmp[len-1] = '\0';
50841 + rtmp->rolename = tmp;
50842 +
50843 + if (!rlast) {
50844 + rtmp->prev = NULL;
50845 + rolep->transitions = rtmp;
50846 + } else {
50847 + rlast->next = rtmp;
50848 + rtmp->prev = rlast;
50849 + }
50850 +
50851 + if (!rusertp)
50852 + rtmp->next = NULL;
50853 + }
50854 +
50855 + return 0;
50856 +}
50857 +
50858 +static struct acl_subject_label *
50859 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
50860 +{
50861 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
50862 + unsigned int len;
50863 + char *tmp;
50864 + __u32 num_objs;
50865 + struct acl_ip_label **i_tmp, *i_utmp2;
50866 + struct gr_hash_struct ghash;
50867 + struct subject_map *subjmap;
50868 + unsigned int i_num;
50869 + int err;
50870 +
50871 + s_tmp = lookup_subject_map(userp);
50872 +
50873 + /* we've already copied this subject into the kernel, just return
50874 + the reference to it, and don't copy it over again
50875 + */
50876 + if (s_tmp)
50877 + return(s_tmp);
50878 +
50879 + if ((s_tmp = (struct acl_subject_label *)
50880 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
50881 + return ERR_PTR(-ENOMEM);
50882 +
50883 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
50884 + if (subjmap == NULL)
50885 + return ERR_PTR(-ENOMEM);
50886 +
50887 + subjmap->user = userp;
50888 + subjmap->kernel = s_tmp;
50889 + insert_subj_map_entry(subjmap);
50890 +
50891 + if (copy_from_user(s_tmp, userp,
50892 + sizeof (struct acl_subject_label)))
50893 + return ERR_PTR(-EFAULT);
50894 +
50895 + len = strnlen_user(s_tmp->filename, PATH_MAX);
50896 +
50897 + if (!len || len >= PATH_MAX)
50898 + return ERR_PTR(-EINVAL);
50899 +
50900 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50901 + return ERR_PTR(-ENOMEM);
50902 +
50903 + if (copy_from_user(tmp, s_tmp->filename, len))
50904 + return ERR_PTR(-EFAULT);
50905 + tmp[len-1] = '\0';
50906 + s_tmp->filename = tmp;
50907 +
50908 + if (!strcmp(s_tmp->filename, "/"))
50909 + role->root_label = s_tmp;
50910 +
50911 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
50912 + return ERR_PTR(-EFAULT);
50913 +
50914 + /* copy user and group transition tables */
50915 +
50916 + if (s_tmp->user_trans_num) {
50917 + uid_t *uidlist;
50918 +
50919 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
50920 + if (uidlist == NULL)
50921 + return ERR_PTR(-ENOMEM);
50922 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
50923 + return ERR_PTR(-EFAULT);
50924 +
50925 + s_tmp->user_transitions = uidlist;
50926 + }
50927 +
50928 + if (s_tmp->group_trans_num) {
50929 + gid_t *gidlist;
50930 +
50931 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
50932 + if (gidlist == NULL)
50933 + return ERR_PTR(-ENOMEM);
50934 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
50935 + return ERR_PTR(-EFAULT);
50936 +
50937 + s_tmp->group_transitions = gidlist;
50938 + }
50939 +
50940 + /* set up object hash table */
50941 + num_objs = count_user_objs(ghash.first);
50942 +
50943 + s_tmp->obj_hash_size = num_objs;
50944 + s_tmp->obj_hash =
50945 + (struct acl_object_label **)
50946 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
50947 +
50948 + if (!s_tmp->obj_hash)
50949 + return ERR_PTR(-ENOMEM);
50950 +
50951 + memset(s_tmp->obj_hash, 0,
50952 + s_tmp->obj_hash_size *
50953 + sizeof (struct acl_object_label *));
50954 +
50955 + /* add in objects */
50956 + err = copy_user_objs(ghash.first, s_tmp, role);
50957 +
50958 + if (err)
50959 + return ERR_PTR(err);
50960 +
50961 + /* set pointer for parent subject */
50962 + if (s_tmp->parent_subject) {
50963 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
50964 +
50965 + if (IS_ERR(s_tmp2))
50966 + return s_tmp2;
50967 +
50968 + s_tmp->parent_subject = s_tmp2;
50969 + }
50970 +
50971 + /* add in ip acls */
50972 +
50973 + if (!s_tmp->ip_num) {
50974 + s_tmp->ips = NULL;
50975 + goto insert;
50976 + }
50977 +
50978 + i_tmp =
50979 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
50980 + sizeof (struct acl_ip_label *));
50981 +
50982 + if (!i_tmp)
50983 + return ERR_PTR(-ENOMEM);
50984 +
50985 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
50986 + *(i_tmp + i_num) =
50987 + (struct acl_ip_label *)
50988 + acl_alloc(sizeof (struct acl_ip_label));
50989 + if (!*(i_tmp + i_num))
50990 + return ERR_PTR(-ENOMEM);
50991 +
50992 + if (copy_from_user
50993 + (&i_utmp2, s_tmp->ips + i_num,
50994 + sizeof (struct acl_ip_label *)))
50995 + return ERR_PTR(-EFAULT);
50996 +
50997 + if (copy_from_user
50998 + (*(i_tmp + i_num), i_utmp2,
50999 + sizeof (struct acl_ip_label)))
51000 + return ERR_PTR(-EFAULT);
51001 +
51002 + if ((*(i_tmp + i_num))->iface == NULL)
51003 + continue;
51004 +
51005 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51006 + if (!len || len >= IFNAMSIZ)
51007 + return ERR_PTR(-EINVAL);
51008 + tmp = acl_alloc(len);
51009 + if (tmp == NULL)
51010 + return ERR_PTR(-ENOMEM);
51011 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51012 + return ERR_PTR(-EFAULT);
51013 + (*(i_tmp + i_num))->iface = tmp;
51014 + }
51015 +
51016 + s_tmp->ips = i_tmp;
51017 +
51018 +insert:
51019 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51020 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51021 + return ERR_PTR(-ENOMEM);
51022 +
51023 + return s_tmp;
51024 +}
51025 +
51026 +static int
51027 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51028 +{
51029 + struct acl_subject_label s_pre;
51030 + struct acl_subject_label * ret;
51031 + int err;
51032 +
51033 + while (userp) {
51034 + if (copy_from_user(&s_pre, userp,
51035 + sizeof (struct acl_subject_label)))
51036 + return -EFAULT;
51037 +
51038 + /* do not add nested subjects here, add
51039 + while parsing objects
51040 + */
51041 +
51042 + if (s_pre.mode & GR_NESTED) {
51043 + userp = s_pre.prev;
51044 + continue;
51045 + }
51046 +
51047 + ret = do_copy_user_subj(userp, role);
51048 +
51049 + err = PTR_ERR(ret);
51050 + if (IS_ERR(ret))
51051 + return err;
51052 +
51053 + insert_acl_subj_label(ret, role);
51054 +
51055 + userp = s_pre.prev;
51056 + }
51057 +
51058 + return 0;
51059 +}
51060 +
51061 +static int
51062 +copy_user_acl(struct gr_arg *arg)
51063 +{
51064 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51065 + struct sprole_pw *sptmp;
51066 + struct gr_hash_struct *ghash;
51067 + uid_t *domainlist;
51068 + unsigned int r_num;
51069 + unsigned int len;
51070 + char *tmp;
51071 + int err = 0;
51072 + __u16 i;
51073 + __u32 num_subjs;
51074 +
51075 + /* we need a default and kernel role */
51076 + if (arg->role_db.num_roles < 2)
51077 + return -EINVAL;
51078 +
51079 + /* copy special role authentication info from userspace */
51080 +
51081 + num_sprole_pws = arg->num_sprole_pws;
51082 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51083 +
51084 + if (!acl_special_roles && num_sprole_pws)
51085 + return -ENOMEM;
51086 +
51087 + for (i = 0; i < num_sprole_pws; i++) {
51088 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51089 + if (!sptmp)
51090 + return -ENOMEM;
51091 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51092 + sizeof (struct sprole_pw)))
51093 + return -EFAULT;
51094 +
51095 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51096 +
51097 + if (!len || len >= GR_SPROLE_LEN)
51098 + return -EINVAL;
51099 +
51100 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51101 + return -ENOMEM;
51102 +
51103 + if (copy_from_user(tmp, sptmp->rolename, len))
51104 + return -EFAULT;
51105 +
51106 + tmp[len-1] = '\0';
51107 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51108 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51109 +#endif
51110 + sptmp->rolename = tmp;
51111 + acl_special_roles[i] = sptmp;
51112 + }
51113 +
51114 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51115 +
51116 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51117 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51118 +
51119 + if (!r_tmp)
51120 + return -ENOMEM;
51121 +
51122 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51123 + sizeof (struct acl_role_label *)))
51124 + return -EFAULT;
51125 +
51126 + if (copy_from_user(r_tmp, r_utmp2,
51127 + sizeof (struct acl_role_label)))
51128 + return -EFAULT;
51129 +
51130 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51131 +
51132 + if (!len || len >= PATH_MAX)
51133 + return -EINVAL;
51134 +
51135 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51136 + return -ENOMEM;
51137 +
51138 + if (copy_from_user(tmp, r_tmp->rolename, len))
51139 + return -EFAULT;
51140 +
51141 + tmp[len-1] = '\0';
51142 + r_tmp->rolename = tmp;
51143 +
51144 + if (!strcmp(r_tmp->rolename, "default")
51145 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51146 + default_role = r_tmp;
51147 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51148 + kernel_role = r_tmp;
51149 + }
51150 +
51151 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51152 + return -ENOMEM;
51153 +
51154 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51155 + return -EFAULT;
51156 +
51157 + r_tmp->hash = ghash;
51158 +
51159 + num_subjs = count_user_subjs(r_tmp->hash->first);
51160 +
51161 + r_tmp->subj_hash_size = num_subjs;
51162 + r_tmp->subj_hash =
51163 + (struct acl_subject_label **)
51164 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51165 +
51166 + if (!r_tmp->subj_hash)
51167 + return -ENOMEM;
51168 +
51169 + err = copy_user_allowedips(r_tmp);
51170 + if (err)
51171 + return err;
51172 +
51173 + /* copy domain info */
51174 + if (r_tmp->domain_children != NULL) {
51175 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51176 + if (domainlist == NULL)
51177 + return -ENOMEM;
51178 +
51179 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51180 + return -EFAULT;
51181 +
51182 + r_tmp->domain_children = domainlist;
51183 + }
51184 +
51185 + err = copy_user_transitions(r_tmp);
51186 + if (err)
51187 + return err;
51188 +
51189 + memset(r_tmp->subj_hash, 0,
51190 + r_tmp->subj_hash_size *
51191 + sizeof (struct acl_subject_label *));
51192 +
51193 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51194 +
51195 + if (err)
51196 + return err;
51197 +
51198 + /* set nested subject list to null */
51199 + r_tmp->hash->first = NULL;
51200 +
51201 + insert_acl_role_label(r_tmp);
51202 + }
51203 +
51204 + if (default_role == NULL || kernel_role == NULL)
51205 + return -EINVAL;
51206 +
51207 + return err;
51208 +}
51209 +
51210 +static int
51211 +gracl_init(struct gr_arg *args)
51212 +{
51213 + int error = 0;
51214 +
51215 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51216 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51217 +
51218 + if (init_variables(args)) {
51219 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51220 + error = -ENOMEM;
51221 + free_variables();
51222 + goto out;
51223 + }
51224 +
51225 + error = copy_user_acl(args);
51226 + free_init_variables();
51227 + if (error) {
51228 + free_variables();
51229 + goto out;
51230 + }
51231 +
51232 + if ((error = gr_set_acls(0))) {
51233 + free_variables();
51234 + goto out;
51235 + }
51236 +
51237 + pax_open_kernel();
51238 + gr_status |= GR_READY;
51239 + pax_close_kernel();
51240 +
51241 + out:
51242 + return error;
51243 +}
51244 +
51245 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51246 +
51247 +static int
51248 +glob_match(const char *p, const char *n)
51249 +{
51250 + char c;
51251 +
51252 + while ((c = *p++) != '\0') {
51253 + switch (c) {
51254 + case '?':
51255 + if (*n == '\0')
51256 + return 1;
51257 + else if (*n == '/')
51258 + return 1;
51259 + break;
51260 + case '\\':
51261 + if (*n != c)
51262 + return 1;
51263 + break;
51264 + case '*':
51265 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51266 + if (*n == '/')
51267 + return 1;
51268 + else if (c == '?') {
51269 + if (*n == '\0')
51270 + return 1;
51271 + else
51272 + ++n;
51273 + }
51274 + }
51275 + if (c == '\0') {
51276 + return 0;
51277 + } else {
51278 + const char *endp;
51279 +
51280 + if ((endp = strchr(n, '/')) == NULL)
51281 + endp = n + strlen(n);
51282 +
51283 + if (c == '[') {
51284 + for (--p; n < endp; ++n)
51285 + if (!glob_match(p, n))
51286 + return 0;
51287 + } else if (c == '/') {
51288 + while (*n != '\0' && *n != '/')
51289 + ++n;
51290 + if (*n == '/' && !glob_match(p, n + 1))
51291 + return 0;
51292 + } else {
51293 + for (--p; n < endp; ++n)
51294 + if (*n == c && !glob_match(p, n))
51295 + return 0;
51296 + }
51297 +
51298 + return 1;
51299 + }
51300 + case '[':
51301 + {
51302 + int not;
51303 + char cold;
51304 +
51305 + if (*n == '\0' || *n == '/')
51306 + return 1;
51307 +
51308 + not = (*p == '!' || *p == '^');
51309 + if (not)
51310 + ++p;
51311 +
51312 + c = *p++;
51313 + for (;;) {
51314 + unsigned char fn = (unsigned char)*n;
51315 +
51316 + if (c == '\0')
51317 + return 1;
51318 + else {
51319 + if (c == fn)
51320 + goto matched;
51321 + cold = c;
51322 + c = *p++;
51323 +
51324 + if (c == '-' && *p != ']') {
51325 + unsigned char cend = *p++;
51326 +
51327 + if (cend == '\0')
51328 + return 1;
51329 +
51330 + if (cold <= fn && fn <= cend)
51331 + goto matched;
51332 +
51333 + c = *p++;
51334 + }
51335 + }
51336 +
51337 + if (c == ']')
51338 + break;
51339 + }
51340 + if (!not)
51341 + return 1;
51342 + break;
51343 + matched:
51344 + while (c != ']') {
51345 + if (c == '\0')
51346 + return 1;
51347 +
51348 + c = *p++;
51349 + }
51350 + if (not)
51351 + return 1;
51352 + }
51353 + break;
51354 + default:
51355 + if (c != *n)
51356 + return 1;
51357 + }
51358 +
51359 + ++n;
51360 + }
51361 +
51362 + if (*n == '\0')
51363 + return 0;
51364 +
51365 + if (*n == '/')
51366 + return 0;
51367 +
51368 + return 1;
51369 +}
51370 +
51371 +static struct acl_object_label *
51372 +chk_glob_label(struct acl_object_label *globbed,
51373 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51374 +{
51375 + struct acl_object_label *tmp;
51376 +
51377 + if (*path == NULL)
51378 + *path = gr_to_filename_nolock(dentry, mnt);
51379 +
51380 + tmp = globbed;
51381 +
51382 + while (tmp) {
51383 + if (!glob_match(tmp->filename, *path))
51384 + return tmp;
51385 + tmp = tmp->next;
51386 + }
51387 +
51388 + return NULL;
51389 +}
51390 +
51391 +static struct acl_object_label *
51392 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51393 + const ino_t curr_ino, const dev_t curr_dev,
51394 + const struct acl_subject_label *subj, char **path, const int checkglob)
51395 +{
51396 + struct acl_subject_label *tmpsubj;
51397 + struct acl_object_label *retval;
51398 + struct acl_object_label *retval2;
51399 +
51400 + tmpsubj = (struct acl_subject_label *) subj;
51401 + read_lock(&gr_inode_lock);
51402 + do {
51403 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51404 + if (retval) {
51405 + if (checkglob && retval->globbed) {
51406 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51407 + if (retval2)
51408 + retval = retval2;
51409 + }
51410 + break;
51411 + }
51412 + } while ((tmpsubj = tmpsubj->parent_subject));
51413 + read_unlock(&gr_inode_lock);
51414 +
51415 + return retval;
51416 +}
51417 +
51418 +static __inline__ struct acl_object_label *
51419 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51420 + struct dentry *curr_dentry,
51421 + const struct acl_subject_label *subj, char **path, const int checkglob)
51422 +{
51423 + int newglob = checkglob;
51424 + ino_t inode;
51425 + dev_t device;
51426 +
51427 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51428 + as we don't want a / * rule to match instead of the / object
51429 + don't do this for create lookups that call this function though, since they're looking up
51430 + on the parent and thus need globbing checks on all paths
51431 + */
51432 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51433 + newglob = GR_NO_GLOB;
51434 +
51435 + spin_lock(&curr_dentry->d_lock);
51436 + inode = curr_dentry->d_inode->i_ino;
51437 + device = __get_dev(curr_dentry);
51438 + spin_unlock(&curr_dentry->d_lock);
51439 +
51440 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51441 +}
51442 +
51443 +static struct acl_object_label *
51444 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51445 + const struct acl_subject_label *subj, char *path, const int checkglob)
51446 +{
51447 + struct dentry *dentry = (struct dentry *) l_dentry;
51448 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51449 + struct mount *real_mnt = real_mount(mnt);
51450 + struct acl_object_label *retval;
51451 + struct dentry *parent;
51452 +
51453 + write_seqlock(&rename_lock);
51454 + br_read_lock(vfsmount_lock);
51455 +
51456 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51457 +#ifdef CONFIG_NET
51458 + mnt == sock_mnt ||
51459 +#endif
51460 +#ifdef CONFIG_HUGETLBFS
51461 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51462 +#endif
51463 + /* ignore Eric Biederman */
51464 + IS_PRIVATE(l_dentry->d_inode))) {
51465 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51466 + goto out;
51467 + }
51468 +
51469 + for (;;) {
51470 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51471 + break;
51472 +
51473 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51474 + if (!mnt_has_parent(real_mnt))
51475 + break;
51476 +
51477 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51478 + if (retval != NULL)
51479 + goto out;
51480 +
51481 + dentry = real_mnt->mnt_mountpoint;
51482 + real_mnt = real_mnt->mnt_parent;
51483 + mnt = &real_mnt->mnt;
51484 + continue;
51485 + }
51486 +
51487 + parent = dentry->d_parent;
51488 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51489 + if (retval != NULL)
51490 + goto out;
51491 +
51492 + dentry = parent;
51493 + }
51494 +
51495 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51496 +
51497 + /* real_root is pinned so we don't have to hold a reference */
51498 + if (retval == NULL)
51499 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51500 +out:
51501 + br_read_unlock(vfsmount_lock);
51502 + write_sequnlock(&rename_lock);
51503 +
51504 + BUG_ON(retval == NULL);
51505 +
51506 + return retval;
51507 +}
51508 +
51509 +static __inline__ struct acl_object_label *
51510 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51511 + const struct acl_subject_label *subj)
51512 +{
51513 + char *path = NULL;
51514 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51515 +}
51516 +
51517 +static __inline__ struct acl_object_label *
51518 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51519 + const struct acl_subject_label *subj)
51520 +{
51521 + char *path = NULL;
51522 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
51523 +}
51524 +
51525 +static __inline__ struct acl_object_label *
51526 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51527 + const struct acl_subject_label *subj, char *path)
51528 +{
51529 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
51530 +}
51531 +
51532 +static struct acl_subject_label *
51533 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51534 + const struct acl_role_label *role)
51535 +{
51536 + struct dentry *dentry = (struct dentry *) l_dentry;
51537 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51538 + struct mount *real_mnt = real_mount(mnt);
51539 + struct acl_subject_label *retval;
51540 + struct dentry *parent;
51541 +
51542 + write_seqlock(&rename_lock);
51543 + br_read_lock(vfsmount_lock);
51544 +
51545 + for (;;) {
51546 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51547 + break;
51548 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51549 + if (!mnt_has_parent(real_mnt))
51550 + break;
51551 +
51552 + spin_lock(&dentry->d_lock);
51553 + read_lock(&gr_inode_lock);
51554 + retval =
51555 + lookup_acl_subj_label(dentry->d_inode->i_ino,
51556 + __get_dev(dentry), role);
51557 + read_unlock(&gr_inode_lock);
51558 + spin_unlock(&dentry->d_lock);
51559 + if (retval != NULL)
51560 + goto out;
51561 +
51562 + dentry = real_mnt->mnt_mountpoint;
51563 + real_mnt = real_mnt->mnt_parent;
51564 + mnt = &real_mnt->mnt;
51565 + continue;
51566 + }
51567 +
51568 + spin_lock(&dentry->d_lock);
51569 + read_lock(&gr_inode_lock);
51570 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51571 + __get_dev(dentry), role);
51572 + read_unlock(&gr_inode_lock);
51573 + parent = dentry->d_parent;
51574 + spin_unlock(&dentry->d_lock);
51575 +
51576 + if (retval != NULL)
51577 + goto out;
51578 +
51579 + dentry = parent;
51580 + }
51581 +
51582 + spin_lock(&dentry->d_lock);
51583 + read_lock(&gr_inode_lock);
51584 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51585 + __get_dev(dentry), role);
51586 + read_unlock(&gr_inode_lock);
51587 + spin_unlock(&dentry->d_lock);
51588 +
51589 + if (unlikely(retval == NULL)) {
51590 + /* real_root is pinned, we don't need to hold a reference */
51591 + read_lock(&gr_inode_lock);
51592 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
51593 + __get_dev(real_root.dentry), role);
51594 + read_unlock(&gr_inode_lock);
51595 + }
51596 +out:
51597 + br_read_unlock(vfsmount_lock);
51598 + write_sequnlock(&rename_lock);
51599 +
51600 + BUG_ON(retval == NULL);
51601 +
51602 + return retval;
51603 +}
51604 +
51605 +static void
51606 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
51607 +{
51608 + struct task_struct *task = current;
51609 + const struct cred *cred = current_cred();
51610 +
51611 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51612 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51613 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51614 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
51615 +
51616 + return;
51617 +}
51618 +
51619 +static void
51620 +gr_log_learn_sysctl(const char *path, const __u32 mode)
51621 +{
51622 + struct task_struct *task = current;
51623 + const struct cred *cred = current_cred();
51624 +
51625 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51626 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51627 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51628 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
51629 +
51630 + return;
51631 +}
51632 +
51633 +static void
51634 +gr_log_learn_id_change(const char type, const unsigned int real,
51635 + const unsigned int effective, const unsigned int fs)
51636 +{
51637 + struct task_struct *task = current;
51638 + const struct cred *cred = current_cred();
51639 +
51640 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
51641 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51642 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51643 + type, real, effective, fs, &task->signal->saved_ip);
51644 +
51645 + return;
51646 +}
51647 +
51648 +__u32
51649 +gr_search_file(const struct dentry * dentry, const __u32 mode,
51650 + const struct vfsmount * mnt)
51651 +{
51652 + __u32 retval = mode;
51653 + struct acl_subject_label *curracl;
51654 + struct acl_object_label *currobj;
51655 +
51656 + if (unlikely(!(gr_status & GR_READY)))
51657 + return (mode & ~GR_AUDITS);
51658 +
51659 + curracl = current->acl;
51660 +
51661 + currobj = chk_obj_label(dentry, mnt, curracl);
51662 + retval = currobj->mode & mode;
51663 +
51664 + /* if we're opening a specified transfer file for writing
51665 + (e.g. /dev/initctl), then transfer our role to init
51666 + */
51667 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
51668 + current->role->roletype & GR_ROLE_PERSIST)) {
51669 + struct task_struct *task = init_pid_ns.child_reaper;
51670 +
51671 + if (task->role != current->role) {
51672 + task->acl_sp_role = 0;
51673 + task->acl_role_id = current->acl_role_id;
51674 + task->role = current->role;
51675 + rcu_read_lock();
51676 + read_lock(&grsec_exec_file_lock);
51677 + gr_apply_subject_to_task(task);
51678 + read_unlock(&grsec_exec_file_lock);
51679 + rcu_read_unlock();
51680 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
51681 + }
51682 + }
51683 +
51684 + if (unlikely
51685 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
51686 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
51687 + __u32 new_mode = mode;
51688 +
51689 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51690 +
51691 + retval = new_mode;
51692 +
51693 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
51694 + new_mode |= GR_INHERIT;
51695 +
51696 + if (!(mode & GR_NOLEARN))
51697 + gr_log_learn(dentry, mnt, new_mode);
51698 + }
51699 +
51700 + return retval;
51701 +}
51702 +
51703 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
51704 + const struct dentry *parent,
51705 + const struct vfsmount *mnt)
51706 +{
51707 + struct name_entry *match;
51708 + struct acl_object_label *matchpo;
51709 + struct acl_subject_label *curracl;
51710 + char *path;
51711 +
51712 + if (unlikely(!(gr_status & GR_READY)))
51713 + return NULL;
51714 +
51715 + preempt_disable();
51716 + path = gr_to_filename_rbac(new_dentry, mnt);
51717 + match = lookup_name_entry_create(path);
51718 +
51719 + curracl = current->acl;
51720 +
51721 + if (match) {
51722 + read_lock(&gr_inode_lock);
51723 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
51724 + read_unlock(&gr_inode_lock);
51725 +
51726 + if (matchpo) {
51727 + preempt_enable();
51728 + return matchpo;
51729 + }
51730 + }
51731 +
51732 + // lookup parent
51733 +
51734 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
51735 +
51736 + preempt_enable();
51737 + return matchpo;
51738 +}
51739 +
51740 +__u32
51741 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
51742 + const struct vfsmount * mnt, const __u32 mode)
51743 +{
51744 + struct acl_object_label *matchpo;
51745 + __u32 retval;
51746 +
51747 + if (unlikely(!(gr_status & GR_READY)))
51748 + return (mode & ~GR_AUDITS);
51749 +
51750 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
51751 +
51752 + retval = matchpo->mode & mode;
51753 +
51754 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
51755 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51756 + __u32 new_mode = mode;
51757 +
51758 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51759 +
51760 + gr_log_learn(new_dentry, mnt, new_mode);
51761 + return new_mode;
51762 + }
51763 +
51764 + return retval;
51765 +}
51766 +
51767 +__u32
51768 +gr_check_link(const struct dentry * new_dentry,
51769 + const struct dentry * parent_dentry,
51770 + const struct vfsmount * parent_mnt,
51771 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
51772 +{
51773 + struct acl_object_label *obj;
51774 + __u32 oldmode, newmode;
51775 + __u32 needmode;
51776 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
51777 + GR_DELETE | GR_INHERIT;
51778 +
51779 + if (unlikely(!(gr_status & GR_READY)))
51780 + return (GR_CREATE | GR_LINK);
51781 +
51782 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
51783 + oldmode = obj->mode;
51784 +
51785 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
51786 + newmode = obj->mode;
51787 +
51788 + needmode = newmode & checkmodes;
51789 +
51790 + // old name for hardlink must have at least the permissions of the new name
51791 + if ((oldmode & needmode) != needmode)
51792 + goto bad;
51793 +
51794 + // if old name had restrictions/auditing, make sure the new name does as well
51795 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
51796 +
51797 + // don't allow hardlinking of suid/sgid files without permission
51798 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
51799 + needmode |= GR_SETID;
51800 +
51801 + if ((newmode & needmode) != needmode)
51802 + goto bad;
51803 +
51804 + // enforce minimum permissions
51805 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
51806 + return newmode;
51807 +bad:
51808 + needmode = oldmode;
51809 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
51810 + needmode |= GR_SETID;
51811 +
51812 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
51813 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
51814 + return (GR_CREATE | GR_LINK);
51815 + } else if (newmode & GR_SUPPRESS)
51816 + return GR_SUPPRESS;
51817 + else
51818 + return 0;
51819 +}
51820 +
51821 +int
51822 +gr_check_hidden_task(const struct task_struct *task)
51823 +{
51824 + if (unlikely(!(gr_status & GR_READY)))
51825 + return 0;
51826 +
51827 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
51828 + return 1;
51829 +
51830 + return 0;
51831 +}
51832 +
51833 +int
51834 +gr_check_protected_task(const struct task_struct *task)
51835 +{
51836 + if (unlikely(!(gr_status & GR_READY) || !task))
51837 + return 0;
51838 +
51839 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
51840 + task->acl != current->acl)
51841 + return 1;
51842 +
51843 + return 0;
51844 +}
51845 +
51846 +int
51847 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
51848 +{
51849 + struct task_struct *p;
51850 + int ret = 0;
51851 +
51852 + if (unlikely(!(gr_status & GR_READY) || !pid))
51853 + return ret;
51854 +
51855 + read_lock(&tasklist_lock);
51856 + do_each_pid_task(pid, type, p) {
51857 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
51858 + p->acl != current->acl) {
51859 + ret = 1;
51860 + goto out;
51861 + }
51862 + } while_each_pid_task(pid, type, p);
51863 +out:
51864 + read_unlock(&tasklist_lock);
51865 +
51866 + return ret;
51867 +}
51868 +
51869 +void
51870 +gr_copy_label(struct task_struct *tsk)
51871 +{
51872 + /* plain copying of fields is already done by dup_task_struct */
51873 + tsk->signal->used_accept = 0;
51874 + tsk->acl_sp_role = 0;
51875 + //tsk->acl_role_id = current->acl_role_id;
51876 + //tsk->acl = current->acl;
51877 + //tsk->role = current->role;
51878 + tsk->signal->curr_ip = current->signal->curr_ip;
51879 + tsk->signal->saved_ip = current->signal->saved_ip;
51880 + if (current->exec_file)
51881 + get_file(current->exec_file);
51882 + //tsk->exec_file = current->exec_file;
51883 + //tsk->is_writable = current->is_writable;
51884 + if (unlikely(current->signal->used_accept)) {
51885 + current->signal->curr_ip = 0;
51886 + current->signal->saved_ip = 0;
51887 + }
51888 +
51889 + return;
51890 +}
51891 +
51892 +static void
51893 +gr_set_proc_res(struct task_struct *task)
51894 +{
51895 + struct acl_subject_label *proc;
51896 + unsigned short i;
51897 +
51898 + proc = task->acl;
51899 +
51900 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
51901 + return;
51902 +
51903 + for (i = 0; i < RLIM_NLIMITS; i++) {
51904 + if (!(proc->resmask & (1 << i)))
51905 + continue;
51906 +
51907 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
51908 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
51909 + }
51910 +
51911 + return;
51912 +}
51913 +
51914 +extern int __gr_process_user_ban(struct user_struct *user);
51915 +
51916 +int
51917 +gr_check_user_change(int real, int effective, int fs)
51918 +{
51919 + unsigned int i;
51920 + __u16 num;
51921 + uid_t *uidlist;
51922 + int curuid;
51923 + int realok = 0;
51924 + int effectiveok = 0;
51925 + int fsok = 0;
51926 +
51927 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51928 + struct user_struct *user;
51929 +
51930 + if (real == -1)
51931 + goto skipit;
51932 +
51933 + user = find_user(real);
51934 + if (user == NULL)
51935 + goto skipit;
51936 +
51937 + if (__gr_process_user_ban(user)) {
51938 + /* for find_user */
51939 + free_uid(user);
51940 + return 1;
51941 + }
51942 +
51943 + /* for find_user */
51944 + free_uid(user);
51945 +
51946 +skipit:
51947 +#endif
51948 +
51949 + if (unlikely(!(gr_status & GR_READY)))
51950 + return 0;
51951 +
51952 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51953 + gr_log_learn_id_change('u', real, effective, fs);
51954 +
51955 + num = current->acl->user_trans_num;
51956 + uidlist = current->acl->user_transitions;
51957 +
51958 + if (uidlist == NULL)
51959 + return 0;
51960 +
51961 + if (real == -1)
51962 + realok = 1;
51963 + if (effective == -1)
51964 + effectiveok = 1;
51965 + if (fs == -1)
51966 + fsok = 1;
51967 +
51968 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
51969 + for (i = 0; i < num; i++) {
51970 + curuid = (int)uidlist[i];
51971 + if (real == curuid)
51972 + realok = 1;
51973 + if (effective == curuid)
51974 + effectiveok = 1;
51975 + if (fs == curuid)
51976 + fsok = 1;
51977 + }
51978 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
51979 + for (i = 0; i < num; i++) {
51980 + curuid = (int)uidlist[i];
51981 + if (real == curuid)
51982 + break;
51983 + if (effective == curuid)
51984 + break;
51985 + if (fs == curuid)
51986 + break;
51987 + }
51988 + /* not in deny list */
51989 + if (i == num) {
51990 + realok = 1;
51991 + effectiveok = 1;
51992 + fsok = 1;
51993 + }
51994 + }
51995 +
51996 + if (realok && effectiveok && fsok)
51997 + return 0;
51998 + else {
51999 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52000 + return 1;
52001 + }
52002 +}
52003 +
52004 +int
52005 +gr_check_group_change(int real, int effective, int fs)
52006 +{
52007 + unsigned int i;
52008 + __u16 num;
52009 + gid_t *gidlist;
52010 + int curgid;
52011 + int realok = 0;
52012 + int effectiveok = 0;
52013 + int fsok = 0;
52014 +
52015 + if (unlikely(!(gr_status & GR_READY)))
52016 + return 0;
52017 +
52018 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52019 + gr_log_learn_id_change('g', real, effective, fs);
52020 +
52021 + num = current->acl->group_trans_num;
52022 + gidlist = current->acl->group_transitions;
52023 +
52024 + if (gidlist == NULL)
52025 + return 0;
52026 +
52027 + if (real == -1)
52028 + realok = 1;
52029 + if (effective == -1)
52030 + effectiveok = 1;
52031 + if (fs == -1)
52032 + fsok = 1;
52033 +
52034 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52035 + for (i = 0; i < num; i++) {
52036 + curgid = (int)gidlist[i];
52037 + if (real == curgid)
52038 + realok = 1;
52039 + if (effective == curgid)
52040 + effectiveok = 1;
52041 + if (fs == curgid)
52042 + fsok = 1;
52043 + }
52044 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52045 + for (i = 0; i < num; i++) {
52046 + curgid = (int)gidlist[i];
52047 + if (real == curgid)
52048 + break;
52049 + if (effective == curgid)
52050 + break;
52051 + if (fs == curgid)
52052 + break;
52053 + }
52054 + /* not in deny list */
52055 + if (i == num) {
52056 + realok = 1;
52057 + effectiveok = 1;
52058 + fsok = 1;
52059 + }
52060 + }
52061 +
52062 + if (realok && effectiveok && fsok)
52063 + return 0;
52064 + else {
52065 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52066 + return 1;
52067 + }
52068 +}
52069 +
52070 +extern int gr_acl_is_capable(const int cap);
52071 +
52072 +void
52073 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52074 +{
52075 + struct acl_role_label *role = task->role;
52076 + struct acl_subject_label *subj = NULL;
52077 + struct acl_object_label *obj;
52078 + struct file *filp;
52079 +
52080 + if (unlikely(!(gr_status & GR_READY)))
52081 + return;
52082 +
52083 + filp = task->exec_file;
52084 +
52085 + /* kernel process, we'll give them the kernel role */
52086 + if (unlikely(!filp)) {
52087 + task->role = kernel_role;
52088 + task->acl = kernel_role->root_label;
52089 + return;
52090 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52091 + role = lookup_acl_role_label(task, uid, gid);
52092 +
52093 + /* don't change the role if we're not a privileged process */
52094 + if (role && task->role != role &&
52095 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52096 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52097 + return;
52098 +
52099 + /* perform subject lookup in possibly new role
52100 + we can use this result below in the case where role == task->role
52101 + */
52102 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52103 +
52104 + /* if we changed uid/gid, but result in the same role
52105 + and are using inheritance, don't lose the inherited subject
52106 + if current subject is other than what normal lookup
52107 + would result in, we arrived via inheritance, don't
52108 + lose subject
52109 + */
52110 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52111 + (subj == task->acl)))
52112 + task->acl = subj;
52113 +
52114 + task->role = role;
52115 +
52116 + task->is_writable = 0;
52117 +
52118 + /* ignore additional mmap checks for processes that are writable
52119 + by the default ACL */
52120 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52121 + if (unlikely(obj->mode & GR_WRITE))
52122 + task->is_writable = 1;
52123 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52124 + if (unlikely(obj->mode & GR_WRITE))
52125 + task->is_writable = 1;
52126 +
52127 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52128 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52129 +#endif
52130 +
52131 + gr_set_proc_res(task);
52132 +
52133 + return;
52134 +}
52135 +
52136 +int
52137 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52138 + const int unsafe_flags)
52139 +{
52140 + struct task_struct *task = current;
52141 + struct acl_subject_label *newacl;
52142 + struct acl_object_label *obj;
52143 + __u32 retmode;
52144 +
52145 + if (unlikely(!(gr_status & GR_READY)))
52146 + return 0;
52147 +
52148 + newacl = chk_subj_label(dentry, mnt, task->role);
52149 +
52150 + task_lock(task);
52151 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52152 + !(task->role->roletype & GR_ROLE_GOD) &&
52153 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52154 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52155 + task_unlock(task);
52156 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52157 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52158 + else
52159 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52160 + return -EACCES;
52161 + }
52162 + task_unlock(task);
52163 +
52164 + obj = chk_obj_label(dentry, mnt, task->acl);
52165 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52166 +
52167 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52168 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52169 + if (obj->nested)
52170 + task->acl = obj->nested;
52171 + else
52172 + task->acl = newacl;
52173 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52174 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52175 +
52176 + task->is_writable = 0;
52177 +
52178 + /* ignore additional mmap checks for processes that are writable
52179 + by the default ACL */
52180 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52181 + if (unlikely(obj->mode & GR_WRITE))
52182 + task->is_writable = 1;
52183 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52184 + if (unlikely(obj->mode & GR_WRITE))
52185 + task->is_writable = 1;
52186 +
52187 + gr_set_proc_res(task);
52188 +
52189 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52190 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52191 +#endif
52192 + return 0;
52193 +}
52194 +
52195 +/* always called with valid inodev ptr */
52196 +static void
52197 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52198 +{
52199 + struct acl_object_label *matchpo;
52200 + struct acl_subject_label *matchps;
52201 + struct acl_subject_label *subj;
52202 + struct acl_role_label *role;
52203 + unsigned int x;
52204 +
52205 + FOR_EACH_ROLE_START(role)
52206 + FOR_EACH_SUBJECT_START(role, subj, x)
52207 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52208 + matchpo->mode |= GR_DELETED;
52209 + FOR_EACH_SUBJECT_END(subj,x)
52210 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52211 + if (subj->inode == ino && subj->device == dev)
52212 + subj->mode |= GR_DELETED;
52213 + FOR_EACH_NESTED_SUBJECT_END(subj)
52214 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52215 + matchps->mode |= GR_DELETED;
52216 + FOR_EACH_ROLE_END(role)
52217 +
52218 + inodev->nentry->deleted = 1;
52219 +
52220 + return;
52221 +}
52222 +
52223 +void
52224 +gr_handle_delete(const ino_t ino, const dev_t dev)
52225 +{
52226 + struct inodev_entry *inodev;
52227 +
52228 + if (unlikely(!(gr_status & GR_READY)))
52229 + return;
52230 +
52231 + write_lock(&gr_inode_lock);
52232 + inodev = lookup_inodev_entry(ino, dev);
52233 + if (inodev != NULL)
52234 + do_handle_delete(inodev, ino, dev);
52235 + write_unlock(&gr_inode_lock);
52236 +
52237 + return;
52238 +}
52239 +
52240 +static void
52241 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52242 + const ino_t newinode, const dev_t newdevice,
52243 + struct acl_subject_label *subj)
52244 +{
52245 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52246 + struct acl_object_label *match;
52247 +
52248 + match = subj->obj_hash[index];
52249 +
52250 + while (match && (match->inode != oldinode ||
52251 + match->device != olddevice ||
52252 + !(match->mode & GR_DELETED)))
52253 + match = match->next;
52254 +
52255 + if (match && (match->inode == oldinode)
52256 + && (match->device == olddevice)
52257 + && (match->mode & GR_DELETED)) {
52258 + if (match->prev == NULL) {
52259 + subj->obj_hash[index] = match->next;
52260 + if (match->next != NULL)
52261 + match->next->prev = NULL;
52262 + } else {
52263 + match->prev->next = match->next;
52264 + if (match->next != NULL)
52265 + match->next->prev = match->prev;
52266 + }
52267 + match->prev = NULL;
52268 + match->next = NULL;
52269 + match->inode = newinode;
52270 + match->device = newdevice;
52271 + match->mode &= ~GR_DELETED;
52272 +
52273 + insert_acl_obj_label(match, subj);
52274 + }
52275 +
52276 + return;
52277 +}
52278 +
52279 +static void
52280 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52281 + const ino_t newinode, const dev_t newdevice,
52282 + struct acl_role_label *role)
52283 +{
52284 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52285 + struct acl_subject_label *match;
52286 +
52287 + match = role->subj_hash[index];
52288 +
52289 + while (match && (match->inode != oldinode ||
52290 + match->device != olddevice ||
52291 + !(match->mode & GR_DELETED)))
52292 + match = match->next;
52293 +
52294 + if (match && (match->inode == oldinode)
52295 + && (match->device == olddevice)
52296 + && (match->mode & GR_DELETED)) {
52297 + if (match->prev == NULL) {
52298 + role->subj_hash[index] = match->next;
52299 + if (match->next != NULL)
52300 + match->next->prev = NULL;
52301 + } else {
52302 + match->prev->next = match->next;
52303 + if (match->next != NULL)
52304 + match->next->prev = match->prev;
52305 + }
52306 + match->prev = NULL;
52307 + match->next = NULL;
52308 + match->inode = newinode;
52309 + match->device = newdevice;
52310 + match->mode &= ~GR_DELETED;
52311 +
52312 + insert_acl_subj_label(match, role);
52313 + }
52314 +
52315 + return;
52316 +}
52317 +
52318 +static void
52319 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52320 + const ino_t newinode, const dev_t newdevice)
52321 +{
52322 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52323 + struct inodev_entry *match;
52324 +
52325 + match = inodev_set.i_hash[index];
52326 +
52327 + while (match && (match->nentry->inode != oldinode ||
52328 + match->nentry->device != olddevice || !match->nentry->deleted))
52329 + match = match->next;
52330 +
52331 + if (match && (match->nentry->inode == oldinode)
52332 + && (match->nentry->device == olddevice) &&
52333 + match->nentry->deleted) {
52334 + if (match->prev == NULL) {
52335 + inodev_set.i_hash[index] = match->next;
52336 + if (match->next != NULL)
52337 + match->next->prev = NULL;
52338 + } else {
52339 + match->prev->next = match->next;
52340 + if (match->next != NULL)
52341 + match->next->prev = match->prev;
52342 + }
52343 + match->prev = NULL;
52344 + match->next = NULL;
52345 + match->nentry->inode = newinode;
52346 + match->nentry->device = newdevice;
52347 + match->nentry->deleted = 0;
52348 +
52349 + insert_inodev_entry(match);
52350 + }
52351 +
52352 + return;
52353 +}
52354 +
52355 +static void
52356 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52357 +{
52358 + struct acl_subject_label *subj;
52359 + struct acl_role_label *role;
52360 + unsigned int x;
52361 +
52362 + FOR_EACH_ROLE_START(role)
52363 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52364 +
52365 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52366 + if ((subj->inode == ino) && (subj->device == dev)) {
52367 + subj->inode = ino;
52368 + subj->device = dev;
52369 + }
52370 + FOR_EACH_NESTED_SUBJECT_END(subj)
52371 + FOR_EACH_SUBJECT_START(role, subj, x)
52372 + update_acl_obj_label(matchn->inode, matchn->device,
52373 + ino, dev, subj);
52374 + FOR_EACH_SUBJECT_END(subj,x)
52375 + FOR_EACH_ROLE_END(role)
52376 +
52377 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52378 +
52379 + return;
52380 +}
52381 +
52382 +static void
52383 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52384 + const struct vfsmount *mnt)
52385 +{
52386 + ino_t ino = dentry->d_inode->i_ino;
52387 + dev_t dev = __get_dev(dentry);
52388 +
52389 + __do_handle_create(matchn, ino, dev);
52390 +
52391 + return;
52392 +}
52393 +
52394 +void
52395 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52396 +{
52397 + struct name_entry *matchn;
52398 +
52399 + if (unlikely(!(gr_status & GR_READY)))
52400 + return;
52401 +
52402 + preempt_disable();
52403 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52404 +
52405 + if (unlikely((unsigned long)matchn)) {
52406 + write_lock(&gr_inode_lock);
52407 + do_handle_create(matchn, dentry, mnt);
52408 + write_unlock(&gr_inode_lock);
52409 + }
52410 + preempt_enable();
52411 +
52412 + return;
52413 +}
52414 +
52415 +void
52416 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52417 +{
52418 + struct name_entry *matchn;
52419 +
52420 + if (unlikely(!(gr_status & GR_READY)))
52421 + return;
52422 +
52423 + preempt_disable();
52424 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52425 +
52426 + if (unlikely((unsigned long)matchn)) {
52427 + write_lock(&gr_inode_lock);
52428 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52429 + write_unlock(&gr_inode_lock);
52430 + }
52431 + preempt_enable();
52432 +
52433 + return;
52434 +}
52435 +
52436 +void
52437 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52438 + struct dentry *old_dentry,
52439 + struct dentry *new_dentry,
52440 + struct vfsmount *mnt, const __u8 replace)
52441 +{
52442 + struct name_entry *matchn;
52443 + struct inodev_entry *inodev;
52444 + struct inode *inode = new_dentry->d_inode;
52445 + ino_t old_ino = old_dentry->d_inode->i_ino;
52446 + dev_t old_dev = __get_dev(old_dentry);
52447 +
52448 + /* vfs_rename swaps the name and parent link for old_dentry and
52449 + new_dentry
52450 + at this point, old_dentry has the new name, parent link, and inode
52451 + for the renamed file
52452 + if a file is being replaced by a rename, new_dentry has the inode
52453 + and name for the replaced file
52454 + */
52455 +
52456 + if (unlikely(!(gr_status & GR_READY)))
52457 + return;
52458 +
52459 + preempt_disable();
52460 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52461 +
52462 + /* we wouldn't have to check d_inode if it weren't for
52463 + NFS silly-renaming
52464 + */
52465 +
52466 + write_lock(&gr_inode_lock);
52467 + if (unlikely(replace && inode)) {
52468 + ino_t new_ino = inode->i_ino;
52469 + dev_t new_dev = __get_dev(new_dentry);
52470 +
52471 + inodev = lookup_inodev_entry(new_ino, new_dev);
52472 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52473 + do_handle_delete(inodev, new_ino, new_dev);
52474 + }
52475 +
52476 + inodev = lookup_inodev_entry(old_ino, old_dev);
52477 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52478 + do_handle_delete(inodev, old_ino, old_dev);
52479 +
52480 + if (unlikely((unsigned long)matchn))
52481 + do_handle_create(matchn, old_dentry, mnt);
52482 +
52483 + write_unlock(&gr_inode_lock);
52484 + preempt_enable();
52485 +
52486 + return;
52487 +}
52488 +
52489 +static int
52490 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52491 + unsigned char **sum)
52492 +{
52493 + struct acl_role_label *r;
52494 + struct role_allowed_ip *ipp;
52495 + struct role_transition *trans;
52496 + unsigned int i;
52497 + int found = 0;
52498 + u32 curr_ip = current->signal->curr_ip;
52499 +
52500 + current->signal->saved_ip = curr_ip;
52501 +
52502 + /* check transition table */
52503 +
52504 + for (trans = current->role->transitions; trans; trans = trans->next) {
52505 + if (!strcmp(rolename, trans->rolename)) {
52506 + found = 1;
52507 + break;
52508 + }
52509 + }
52510 +
52511 + if (!found)
52512 + return 0;
52513 +
52514 + /* handle special roles that do not require authentication
52515 + and check ip */
52516 +
52517 + FOR_EACH_ROLE_START(r)
52518 + if (!strcmp(rolename, r->rolename) &&
52519 + (r->roletype & GR_ROLE_SPECIAL)) {
52520 + found = 0;
52521 + if (r->allowed_ips != NULL) {
52522 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
52523 + if ((ntohl(curr_ip) & ipp->netmask) ==
52524 + (ntohl(ipp->addr) & ipp->netmask))
52525 + found = 1;
52526 + }
52527 + } else
52528 + found = 2;
52529 + if (!found)
52530 + return 0;
52531 +
52532 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
52533 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
52534 + *salt = NULL;
52535 + *sum = NULL;
52536 + return 1;
52537 + }
52538 + }
52539 + FOR_EACH_ROLE_END(r)
52540 +
52541 + for (i = 0; i < num_sprole_pws; i++) {
52542 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
52543 + *salt = acl_special_roles[i]->salt;
52544 + *sum = acl_special_roles[i]->sum;
52545 + return 1;
52546 + }
52547 + }
52548 +
52549 + return 0;
52550 +}
52551 +
52552 +static void
52553 +assign_special_role(char *rolename)
52554 +{
52555 + struct acl_object_label *obj;
52556 + struct acl_role_label *r;
52557 + struct acl_role_label *assigned = NULL;
52558 + struct task_struct *tsk;
52559 + struct file *filp;
52560 +
52561 + FOR_EACH_ROLE_START(r)
52562 + if (!strcmp(rolename, r->rolename) &&
52563 + (r->roletype & GR_ROLE_SPECIAL)) {
52564 + assigned = r;
52565 + break;
52566 + }
52567 + FOR_EACH_ROLE_END(r)
52568 +
52569 + if (!assigned)
52570 + return;
52571 +
52572 + read_lock(&tasklist_lock);
52573 + read_lock(&grsec_exec_file_lock);
52574 +
52575 + tsk = current->real_parent;
52576 + if (tsk == NULL)
52577 + goto out_unlock;
52578 +
52579 + filp = tsk->exec_file;
52580 + if (filp == NULL)
52581 + goto out_unlock;
52582 +
52583 + tsk->is_writable = 0;
52584 +
52585 + tsk->acl_sp_role = 1;
52586 + tsk->acl_role_id = ++acl_sp_role_value;
52587 + tsk->role = assigned;
52588 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
52589 +
52590 + /* ignore additional mmap checks for processes that are writable
52591 + by the default ACL */
52592 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52593 + if (unlikely(obj->mode & GR_WRITE))
52594 + tsk->is_writable = 1;
52595 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
52596 + if (unlikely(obj->mode & GR_WRITE))
52597 + tsk->is_writable = 1;
52598 +
52599 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52600 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
52601 +#endif
52602 +
52603 +out_unlock:
52604 + read_unlock(&grsec_exec_file_lock);
52605 + read_unlock(&tasklist_lock);
52606 + return;
52607 +}
52608 +
52609 +int gr_check_secure_terminal(struct task_struct *task)
52610 +{
52611 + struct task_struct *p, *p2, *p3;
52612 + struct files_struct *files;
52613 + struct fdtable *fdt;
52614 + struct file *our_file = NULL, *file;
52615 + int i;
52616 +
52617 + if (task->signal->tty == NULL)
52618 + return 1;
52619 +
52620 + files = get_files_struct(task);
52621 + if (files != NULL) {
52622 + rcu_read_lock();
52623 + fdt = files_fdtable(files);
52624 + for (i=0; i < fdt->max_fds; i++) {
52625 + file = fcheck_files(files, i);
52626 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
52627 + get_file(file);
52628 + our_file = file;
52629 + }
52630 + }
52631 + rcu_read_unlock();
52632 + put_files_struct(files);
52633 + }
52634 +
52635 + if (our_file == NULL)
52636 + return 1;
52637 +
52638 + read_lock(&tasklist_lock);
52639 + do_each_thread(p2, p) {
52640 + files = get_files_struct(p);
52641 + if (files == NULL ||
52642 + (p->signal && p->signal->tty == task->signal->tty)) {
52643 + if (files != NULL)
52644 + put_files_struct(files);
52645 + continue;
52646 + }
52647 + rcu_read_lock();
52648 + fdt = files_fdtable(files);
52649 + for (i=0; i < fdt->max_fds; i++) {
52650 + file = fcheck_files(files, i);
52651 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
52652 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
52653 + p3 = task;
52654 + while (p3->pid > 0) {
52655 + if (p3 == p)
52656 + break;
52657 + p3 = p3->real_parent;
52658 + }
52659 + if (p3 == p)
52660 + break;
52661 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
52662 + gr_handle_alertkill(p);
52663 + rcu_read_unlock();
52664 + put_files_struct(files);
52665 + read_unlock(&tasklist_lock);
52666 + fput(our_file);
52667 + return 0;
52668 + }
52669 + }
52670 + rcu_read_unlock();
52671 + put_files_struct(files);
52672 + } while_each_thread(p2, p);
52673 + read_unlock(&tasklist_lock);
52674 +
52675 + fput(our_file);
52676 + return 1;
52677 +}
52678 +
52679 +ssize_t
52680 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
52681 +{
52682 + struct gr_arg_wrapper uwrap;
52683 + unsigned char *sprole_salt = NULL;
52684 + unsigned char *sprole_sum = NULL;
52685 + int error = sizeof (struct gr_arg_wrapper);
52686 + int error2 = 0;
52687 +
52688 + mutex_lock(&gr_dev_mutex);
52689 +
52690 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
52691 + error = -EPERM;
52692 + goto out;
52693 + }
52694 +
52695 + if (count != sizeof (struct gr_arg_wrapper)) {
52696 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
52697 + error = -EINVAL;
52698 + goto out;
52699 + }
52700 +
52701 +
52702 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
52703 + gr_auth_expires = 0;
52704 + gr_auth_attempts = 0;
52705 + }
52706 +
52707 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
52708 + error = -EFAULT;
52709 + goto out;
52710 + }
52711 +
52712 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
52713 + error = -EINVAL;
52714 + goto out;
52715 + }
52716 +
52717 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
52718 + error = -EFAULT;
52719 + goto out;
52720 + }
52721 +
52722 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52723 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52724 + time_after(gr_auth_expires, get_seconds())) {
52725 + error = -EBUSY;
52726 + goto out;
52727 + }
52728 +
52729 + /* if non-root trying to do anything other than use a special role,
52730 + do not attempt authentication, do not count towards authentication
52731 + locking
52732 + */
52733 +
52734 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
52735 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52736 + current_uid()) {
52737 + error = -EPERM;
52738 + goto out;
52739 + }
52740 +
52741 + /* ensure pw and special role name are null terminated */
52742 +
52743 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
52744 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
52745 +
52746 + /* Okay.
52747 + * We have our enough of the argument structure..(we have yet
52748 + * to copy_from_user the tables themselves) . Copy the tables
52749 + * only if we need them, i.e. for loading operations. */
52750 +
52751 + switch (gr_usermode->mode) {
52752 + case GR_STATUS:
52753 + if (gr_status & GR_READY) {
52754 + error = 1;
52755 + if (!gr_check_secure_terminal(current))
52756 + error = 3;
52757 + } else
52758 + error = 2;
52759 + goto out;
52760 + case GR_SHUTDOWN:
52761 + if ((gr_status & GR_READY)
52762 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52763 + pax_open_kernel();
52764 + gr_status &= ~GR_READY;
52765 + pax_close_kernel();
52766 +
52767 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
52768 + free_variables();
52769 + memset(gr_usermode, 0, sizeof (struct gr_arg));
52770 + memset(gr_system_salt, 0, GR_SALT_LEN);
52771 + memset(gr_system_sum, 0, GR_SHA_LEN);
52772 + } else if (gr_status & GR_READY) {
52773 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
52774 + error = -EPERM;
52775 + } else {
52776 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
52777 + error = -EAGAIN;
52778 + }
52779 + break;
52780 + case GR_ENABLE:
52781 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
52782 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
52783 + else {
52784 + if (gr_status & GR_READY)
52785 + error = -EAGAIN;
52786 + else
52787 + error = error2;
52788 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
52789 + }
52790 + break;
52791 + case GR_RELOAD:
52792 + if (!(gr_status & GR_READY)) {
52793 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
52794 + error = -EAGAIN;
52795 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52796 + preempt_disable();
52797 +
52798 + pax_open_kernel();
52799 + gr_status &= ~GR_READY;
52800 + pax_close_kernel();
52801 +
52802 + free_variables();
52803 + if (!(error2 = gracl_init(gr_usermode))) {
52804 + preempt_enable();
52805 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
52806 + } else {
52807 + preempt_enable();
52808 + error = error2;
52809 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
52810 + }
52811 + } else {
52812 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
52813 + error = -EPERM;
52814 + }
52815 + break;
52816 + case GR_SEGVMOD:
52817 + if (unlikely(!(gr_status & GR_READY))) {
52818 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
52819 + error = -EAGAIN;
52820 + break;
52821 + }
52822 +
52823 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52824 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
52825 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
52826 + struct acl_subject_label *segvacl;
52827 + segvacl =
52828 + lookup_acl_subj_label(gr_usermode->segv_inode,
52829 + gr_usermode->segv_device,
52830 + current->role);
52831 + if (segvacl) {
52832 + segvacl->crashes = 0;
52833 + segvacl->expires = 0;
52834 + }
52835 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
52836 + gr_remove_uid(gr_usermode->segv_uid);
52837 + }
52838 + } else {
52839 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
52840 + error = -EPERM;
52841 + }
52842 + break;
52843 + case GR_SPROLE:
52844 + case GR_SPROLEPAM:
52845 + if (unlikely(!(gr_status & GR_READY))) {
52846 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
52847 + error = -EAGAIN;
52848 + break;
52849 + }
52850 +
52851 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
52852 + current->role->expires = 0;
52853 + current->role->auth_attempts = 0;
52854 + }
52855 +
52856 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52857 + time_after(current->role->expires, get_seconds())) {
52858 + error = -EBUSY;
52859 + goto out;
52860 + }
52861 +
52862 + if (lookup_special_role_auth
52863 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
52864 + && ((!sprole_salt && !sprole_sum)
52865 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
52866 + char *p = "";
52867 + assign_special_role(gr_usermode->sp_role);
52868 + read_lock(&tasklist_lock);
52869 + if (current->real_parent)
52870 + p = current->real_parent->role->rolename;
52871 + read_unlock(&tasklist_lock);
52872 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
52873 + p, acl_sp_role_value);
52874 + } else {
52875 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
52876 + error = -EPERM;
52877 + if(!(current->role->auth_attempts++))
52878 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
52879 +
52880 + goto out;
52881 + }
52882 + break;
52883 + case GR_UNSPROLE:
52884 + if (unlikely(!(gr_status & GR_READY))) {
52885 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
52886 + error = -EAGAIN;
52887 + break;
52888 + }
52889 +
52890 + if (current->role->roletype & GR_ROLE_SPECIAL) {
52891 + char *p = "";
52892 + int i = 0;
52893 +
52894 + read_lock(&tasklist_lock);
52895 + if (current->real_parent) {
52896 + p = current->real_parent->role->rolename;
52897 + i = current->real_parent->acl_role_id;
52898 + }
52899 + read_unlock(&tasklist_lock);
52900 +
52901 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
52902 + gr_set_acls(1);
52903 + } else {
52904 + error = -EPERM;
52905 + goto out;
52906 + }
52907 + break;
52908 + default:
52909 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
52910 + error = -EINVAL;
52911 + break;
52912 + }
52913 +
52914 + if (error != -EPERM)
52915 + goto out;
52916 +
52917 + if(!(gr_auth_attempts++))
52918 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
52919 +
52920 + out:
52921 + mutex_unlock(&gr_dev_mutex);
52922 + return error;
52923 +}
52924 +
52925 +/* must be called with
52926 + rcu_read_lock();
52927 + read_lock(&tasklist_lock);
52928 + read_lock(&grsec_exec_file_lock);
52929 +*/
52930 +int gr_apply_subject_to_task(struct task_struct *task)
52931 +{
52932 + struct acl_object_label *obj;
52933 + char *tmpname;
52934 + struct acl_subject_label *tmpsubj;
52935 + struct file *filp;
52936 + struct name_entry *nmatch;
52937 +
52938 + filp = task->exec_file;
52939 + if (filp == NULL)
52940 + return 0;
52941 +
52942 + /* the following is to apply the correct subject
52943 + on binaries running when the RBAC system
52944 + is enabled, when the binaries have been
52945 + replaced or deleted since their execution
52946 + -----
52947 + when the RBAC system starts, the inode/dev
52948 + from exec_file will be one the RBAC system
52949 + is unaware of. It only knows the inode/dev
52950 + of the present file on disk, or the absence
52951 + of it.
52952 + */
52953 + preempt_disable();
52954 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
52955 +
52956 + nmatch = lookup_name_entry(tmpname);
52957 + preempt_enable();
52958 + tmpsubj = NULL;
52959 + if (nmatch) {
52960 + if (nmatch->deleted)
52961 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
52962 + else
52963 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
52964 + if (tmpsubj != NULL)
52965 + task->acl = tmpsubj;
52966 + }
52967 + if (tmpsubj == NULL)
52968 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
52969 + task->role);
52970 + if (task->acl) {
52971 + task->is_writable = 0;
52972 + /* ignore additional mmap checks for processes that are writable
52973 + by the default ACL */
52974 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52975 + if (unlikely(obj->mode & GR_WRITE))
52976 + task->is_writable = 1;
52977 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52978 + if (unlikely(obj->mode & GR_WRITE))
52979 + task->is_writable = 1;
52980 +
52981 + gr_set_proc_res(task);
52982 +
52983 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52984 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52985 +#endif
52986 + } else {
52987 + return 1;
52988 + }
52989 +
52990 + return 0;
52991 +}
52992 +
52993 +int
52994 +gr_set_acls(const int type)
52995 +{
52996 + struct task_struct *task, *task2;
52997 + struct acl_role_label *role = current->role;
52998 + __u16 acl_role_id = current->acl_role_id;
52999 + const struct cred *cred;
53000 + int ret;
53001 +
53002 + rcu_read_lock();
53003 + read_lock(&tasklist_lock);
53004 + read_lock(&grsec_exec_file_lock);
53005 + do_each_thread(task2, task) {
53006 + /* check to see if we're called from the exit handler,
53007 + if so, only replace ACLs that have inherited the admin
53008 + ACL */
53009 +
53010 + if (type && (task->role != role ||
53011 + task->acl_role_id != acl_role_id))
53012 + continue;
53013 +
53014 + task->acl_role_id = 0;
53015 + task->acl_sp_role = 0;
53016 +
53017 + if (task->exec_file) {
53018 + cred = __task_cred(task);
53019 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53020 + ret = gr_apply_subject_to_task(task);
53021 + if (ret) {
53022 + read_unlock(&grsec_exec_file_lock);
53023 + read_unlock(&tasklist_lock);
53024 + rcu_read_unlock();
53025 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53026 + return ret;
53027 + }
53028 + } else {
53029 + // it's a kernel process
53030 + task->role = kernel_role;
53031 + task->acl = kernel_role->root_label;
53032 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53033 + task->acl->mode &= ~GR_PROCFIND;
53034 +#endif
53035 + }
53036 + } while_each_thread(task2, task);
53037 + read_unlock(&grsec_exec_file_lock);
53038 + read_unlock(&tasklist_lock);
53039 + rcu_read_unlock();
53040 +
53041 + return 0;
53042 +}
53043 +
53044 +void
53045 +gr_learn_resource(const struct task_struct *task,
53046 + const int res, const unsigned long wanted, const int gt)
53047 +{
53048 + struct acl_subject_label *acl;
53049 + const struct cred *cred;
53050 +
53051 + if (unlikely((gr_status & GR_READY) &&
53052 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53053 + goto skip_reslog;
53054 +
53055 +#ifdef CONFIG_GRKERNSEC_RESLOG
53056 + gr_log_resource(task, res, wanted, gt);
53057 +#endif
53058 + skip_reslog:
53059 +
53060 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53061 + return;
53062 +
53063 + acl = task->acl;
53064 +
53065 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53066 + !(acl->resmask & (1 << (unsigned short) res))))
53067 + return;
53068 +
53069 + if (wanted >= acl->res[res].rlim_cur) {
53070 + unsigned long res_add;
53071 +
53072 + res_add = wanted;
53073 + switch (res) {
53074 + case RLIMIT_CPU:
53075 + res_add += GR_RLIM_CPU_BUMP;
53076 + break;
53077 + case RLIMIT_FSIZE:
53078 + res_add += GR_RLIM_FSIZE_BUMP;
53079 + break;
53080 + case RLIMIT_DATA:
53081 + res_add += GR_RLIM_DATA_BUMP;
53082 + break;
53083 + case RLIMIT_STACK:
53084 + res_add += GR_RLIM_STACK_BUMP;
53085 + break;
53086 + case RLIMIT_CORE:
53087 + res_add += GR_RLIM_CORE_BUMP;
53088 + break;
53089 + case RLIMIT_RSS:
53090 + res_add += GR_RLIM_RSS_BUMP;
53091 + break;
53092 + case RLIMIT_NPROC:
53093 + res_add += GR_RLIM_NPROC_BUMP;
53094 + break;
53095 + case RLIMIT_NOFILE:
53096 + res_add += GR_RLIM_NOFILE_BUMP;
53097 + break;
53098 + case RLIMIT_MEMLOCK:
53099 + res_add += GR_RLIM_MEMLOCK_BUMP;
53100 + break;
53101 + case RLIMIT_AS:
53102 + res_add += GR_RLIM_AS_BUMP;
53103 + break;
53104 + case RLIMIT_LOCKS:
53105 + res_add += GR_RLIM_LOCKS_BUMP;
53106 + break;
53107 + case RLIMIT_SIGPENDING:
53108 + res_add += GR_RLIM_SIGPENDING_BUMP;
53109 + break;
53110 + case RLIMIT_MSGQUEUE:
53111 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53112 + break;
53113 + case RLIMIT_NICE:
53114 + res_add += GR_RLIM_NICE_BUMP;
53115 + break;
53116 + case RLIMIT_RTPRIO:
53117 + res_add += GR_RLIM_RTPRIO_BUMP;
53118 + break;
53119 + case RLIMIT_RTTIME:
53120 + res_add += GR_RLIM_RTTIME_BUMP;
53121 + break;
53122 + }
53123 +
53124 + acl->res[res].rlim_cur = res_add;
53125 +
53126 + if (wanted > acl->res[res].rlim_max)
53127 + acl->res[res].rlim_max = res_add;
53128 +
53129 + /* only log the subject filename, since resource logging is supported for
53130 + single-subject learning only */
53131 + rcu_read_lock();
53132 + cred = __task_cred(task);
53133 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53134 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53135 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53136 + "", (unsigned long) res, &task->signal->saved_ip);
53137 + rcu_read_unlock();
53138 + }
53139 +
53140 + return;
53141 +}
53142 +
53143 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53144 +void
53145 +pax_set_initial_flags(struct linux_binprm *bprm)
53146 +{
53147 + struct task_struct *task = current;
53148 + struct acl_subject_label *proc;
53149 + unsigned long flags;
53150 +
53151 + if (unlikely(!(gr_status & GR_READY)))
53152 + return;
53153 +
53154 + flags = pax_get_flags(task);
53155 +
53156 + proc = task->acl;
53157 +
53158 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53159 + flags &= ~MF_PAX_PAGEEXEC;
53160 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53161 + flags &= ~MF_PAX_SEGMEXEC;
53162 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53163 + flags &= ~MF_PAX_RANDMMAP;
53164 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53165 + flags &= ~MF_PAX_EMUTRAMP;
53166 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53167 + flags &= ~MF_PAX_MPROTECT;
53168 +
53169 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53170 + flags |= MF_PAX_PAGEEXEC;
53171 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53172 + flags |= MF_PAX_SEGMEXEC;
53173 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53174 + flags |= MF_PAX_RANDMMAP;
53175 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53176 + flags |= MF_PAX_EMUTRAMP;
53177 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53178 + flags |= MF_PAX_MPROTECT;
53179 +
53180 + pax_set_flags(task, flags);
53181 +
53182 + return;
53183 +}
53184 +#endif
53185 +
53186 +#ifdef CONFIG_SYSCTL
53187 +/* Eric Biederman likes breaking userland ABI and every inode-based security
53188 + system to save 35kb of memory */
53189 +
53190 +/* we modify the passed in filename, but adjust it back before returning */
53191 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
53192 +{
53193 + struct name_entry *nmatch;
53194 + char *p, *lastp = NULL;
53195 + struct acl_object_label *obj = NULL, *tmp;
53196 + struct acl_subject_label *tmpsubj;
53197 + char c = '\0';
53198 +
53199 + read_lock(&gr_inode_lock);
53200 +
53201 + p = name + len - 1;
53202 + do {
53203 + nmatch = lookup_name_entry(name);
53204 + if (lastp != NULL)
53205 + *lastp = c;
53206 +
53207 + if (nmatch == NULL)
53208 + goto next_component;
53209 + tmpsubj = current->acl;
53210 + do {
53211 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
53212 + if (obj != NULL) {
53213 + tmp = obj->globbed;
53214 + while (tmp) {
53215 + if (!glob_match(tmp->filename, name)) {
53216 + obj = tmp;
53217 + goto found_obj;
53218 + }
53219 + tmp = tmp->next;
53220 + }
53221 + goto found_obj;
53222 + }
53223 + } while ((tmpsubj = tmpsubj->parent_subject));
53224 +next_component:
53225 + /* end case */
53226 + if (p == name)
53227 + break;
53228 +
53229 + while (*p != '/')
53230 + p--;
53231 + if (p == name)
53232 + lastp = p + 1;
53233 + else {
53234 + lastp = p;
53235 + p--;
53236 + }
53237 + c = *lastp;
53238 + *lastp = '\0';
53239 + } while (1);
53240 +found_obj:
53241 + read_unlock(&gr_inode_lock);
53242 + /* obj returned will always be non-null */
53243 + return obj;
53244 +}
53245 +
53246 +/* returns 0 when allowing, non-zero on error
53247 + op of 0 is used for readdir, so we don't log the names of hidden files
53248 +*/
53249 +__u32
53250 +gr_handle_sysctl(const struct ctl_table *table, const int op)
53251 +{
53252 + struct ctl_table *tmp;
53253 + const char *proc_sys = "/proc/sys";
53254 + char *path;
53255 + struct acl_object_label *obj;
53256 + unsigned short len = 0, pos = 0, depth = 0, i;
53257 + __u32 err = 0;
53258 + __u32 mode = 0;
53259 +
53260 + if (unlikely(!(gr_status & GR_READY)))
53261 + return 0;
53262 +
53263 + /* for now, ignore operations on non-sysctl entries if it's not a
53264 + readdir*/
53265 + if (table->child != NULL && op != 0)
53266 + return 0;
53267 +
53268 + mode |= GR_FIND;
53269 + /* it's only a read if it's an entry, read on dirs is for readdir */
53270 + if (op & MAY_READ)
53271 + mode |= GR_READ;
53272 + if (op & MAY_WRITE)
53273 + mode |= GR_WRITE;
53274 +
53275 + preempt_disable();
53276 +
53277 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53278 +
53279 + /* it's only a read/write if it's an actual entry, not a dir
53280 + (which are opened for readdir)
53281 + */
53282 +
53283 + /* convert the requested sysctl entry into a pathname */
53284 +
53285 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53286 + len += strlen(tmp->procname);
53287 + len++;
53288 + depth++;
53289 + }
53290 +
53291 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
53292 + /* deny */
53293 + goto out;
53294 + }
53295 +
53296 + memset(path, 0, PAGE_SIZE);
53297 +
53298 + memcpy(path, proc_sys, strlen(proc_sys));
53299 +
53300 + pos += strlen(proc_sys);
53301 +
53302 + for (; depth > 0; depth--) {
53303 + path[pos] = '/';
53304 + pos++;
53305 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53306 + if (depth == i) {
53307 + memcpy(path + pos, tmp->procname,
53308 + strlen(tmp->procname));
53309 + pos += strlen(tmp->procname);
53310 + }
53311 + i++;
53312 + }
53313 + }
53314 +
53315 + obj = gr_lookup_by_name(path, pos);
53316 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
53317 +
53318 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
53319 + ((err & mode) != mode))) {
53320 + __u32 new_mode = mode;
53321 +
53322 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53323 +
53324 + err = 0;
53325 + gr_log_learn_sysctl(path, new_mode);
53326 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
53327 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
53328 + err = -ENOENT;
53329 + } else if (!(err & GR_FIND)) {
53330 + err = -ENOENT;
53331 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
53332 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
53333 + path, (mode & GR_READ) ? " reading" : "",
53334 + (mode & GR_WRITE) ? " writing" : "");
53335 + err = -EACCES;
53336 + } else if ((err & mode) != mode) {
53337 + err = -EACCES;
53338 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
53339 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
53340 + path, (mode & GR_READ) ? " reading" : "",
53341 + (mode & GR_WRITE) ? " writing" : "");
53342 + err = 0;
53343 + } else
53344 + err = 0;
53345 +
53346 + out:
53347 + preempt_enable();
53348 +
53349 + return err;
53350 +}
53351 +#endif
53352 +
53353 +int
53354 +gr_handle_proc_ptrace(struct task_struct *task)
53355 +{
53356 + struct file *filp;
53357 + struct task_struct *tmp = task;
53358 + struct task_struct *curtemp = current;
53359 + __u32 retmode;
53360 +
53361 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53362 + if (unlikely(!(gr_status & GR_READY)))
53363 + return 0;
53364 +#endif
53365 +
53366 + read_lock(&tasklist_lock);
53367 + read_lock(&grsec_exec_file_lock);
53368 + filp = task->exec_file;
53369 +
53370 + while (tmp->pid > 0) {
53371 + if (tmp == curtemp)
53372 + break;
53373 + tmp = tmp->real_parent;
53374 + }
53375 +
53376 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53377 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53378 + read_unlock(&grsec_exec_file_lock);
53379 + read_unlock(&tasklist_lock);
53380 + return 1;
53381 + }
53382 +
53383 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53384 + if (!(gr_status & GR_READY)) {
53385 + read_unlock(&grsec_exec_file_lock);
53386 + read_unlock(&tasklist_lock);
53387 + return 0;
53388 + }
53389 +#endif
53390 +
53391 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53392 + read_unlock(&grsec_exec_file_lock);
53393 + read_unlock(&tasklist_lock);
53394 +
53395 + if (retmode & GR_NOPTRACE)
53396 + return 1;
53397 +
53398 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53399 + && (current->acl != task->acl || (current->acl != current->role->root_label
53400 + && current->pid != task->pid)))
53401 + return 1;
53402 +
53403 + return 0;
53404 +}
53405 +
53406 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53407 +{
53408 + if (unlikely(!(gr_status & GR_READY)))
53409 + return;
53410 +
53411 + if (!(current->role->roletype & GR_ROLE_GOD))
53412 + return;
53413 +
53414 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53415 + p->role->rolename, gr_task_roletype_to_char(p),
53416 + p->acl->filename);
53417 +}
53418 +
53419 +int
53420 +gr_handle_ptrace(struct task_struct *task, const long request)
53421 +{
53422 + struct task_struct *tmp = task;
53423 + struct task_struct *curtemp = current;
53424 + __u32 retmode;
53425 +
53426 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53427 + if (unlikely(!(gr_status & GR_READY)))
53428 + return 0;
53429 +#endif
53430 +
53431 + read_lock(&tasklist_lock);
53432 + while (tmp->pid > 0) {
53433 + if (tmp == curtemp)
53434 + break;
53435 + tmp = tmp->real_parent;
53436 + }
53437 +
53438 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53439 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53440 + read_unlock(&tasklist_lock);
53441 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53442 + return 1;
53443 + }
53444 + read_unlock(&tasklist_lock);
53445 +
53446 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53447 + if (!(gr_status & GR_READY))
53448 + return 0;
53449 +#endif
53450 +
53451 + read_lock(&grsec_exec_file_lock);
53452 + if (unlikely(!task->exec_file)) {
53453 + read_unlock(&grsec_exec_file_lock);
53454 + return 0;
53455 + }
53456 +
53457 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53458 + read_unlock(&grsec_exec_file_lock);
53459 +
53460 + if (retmode & GR_NOPTRACE) {
53461 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53462 + return 1;
53463 + }
53464 +
53465 + if (retmode & GR_PTRACERD) {
53466 + switch (request) {
53467 + case PTRACE_SEIZE:
53468 + case PTRACE_POKETEXT:
53469 + case PTRACE_POKEDATA:
53470 + case PTRACE_POKEUSR:
53471 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53472 + case PTRACE_SETREGS:
53473 + case PTRACE_SETFPREGS:
53474 +#endif
53475 +#ifdef CONFIG_X86
53476 + case PTRACE_SETFPXREGS:
53477 +#endif
53478 +#ifdef CONFIG_ALTIVEC
53479 + case PTRACE_SETVRREGS:
53480 +#endif
53481 + return 1;
53482 + default:
53483 + return 0;
53484 + }
53485 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53486 + !(current->role->roletype & GR_ROLE_GOD) &&
53487 + (current->acl != task->acl)) {
53488 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53489 + return 1;
53490 + }
53491 +
53492 + return 0;
53493 +}
53494 +
53495 +static int is_writable_mmap(const struct file *filp)
53496 +{
53497 + struct task_struct *task = current;
53498 + struct acl_object_label *obj, *obj2;
53499 +
53500 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53501 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53502 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53503 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53504 + task->role->root_label);
53505 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53506 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53507 + return 1;
53508 + }
53509 + }
53510 + return 0;
53511 +}
53512 +
53513 +int
53514 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53515 +{
53516 + __u32 mode;
53517 +
53518 + if (unlikely(!file || !(prot & PROT_EXEC)))
53519 + return 1;
53520 +
53521 + if (is_writable_mmap(file))
53522 + return 0;
53523 +
53524 + mode =
53525 + gr_search_file(file->f_path.dentry,
53526 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53527 + file->f_path.mnt);
53528 +
53529 + if (!gr_tpe_allow(file))
53530 + return 0;
53531 +
53532 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53533 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53534 + return 0;
53535 + } else if (unlikely(!(mode & GR_EXEC))) {
53536 + return 0;
53537 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53538 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53539 + return 1;
53540 + }
53541 +
53542 + return 1;
53543 +}
53544 +
53545 +int
53546 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53547 +{
53548 + __u32 mode;
53549 +
53550 + if (unlikely(!file || !(prot & PROT_EXEC)))
53551 + return 1;
53552 +
53553 + if (is_writable_mmap(file))
53554 + return 0;
53555 +
53556 + mode =
53557 + gr_search_file(file->f_path.dentry,
53558 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53559 + file->f_path.mnt);
53560 +
53561 + if (!gr_tpe_allow(file))
53562 + return 0;
53563 +
53564 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53565 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53566 + return 0;
53567 + } else if (unlikely(!(mode & GR_EXEC))) {
53568 + return 0;
53569 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53570 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53571 + return 1;
53572 + }
53573 +
53574 + return 1;
53575 +}
53576 +
53577 +void
53578 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53579 +{
53580 + unsigned long runtime;
53581 + unsigned long cputime;
53582 + unsigned int wday, cday;
53583 + __u8 whr, chr;
53584 + __u8 wmin, cmin;
53585 + __u8 wsec, csec;
53586 + struct timespec timeval;
53587 +
53588 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53589 + !(task->acl->mode & GR_PROCACCT)))
53590 + return;
53591 +
53592 + do_posix_clock_monotonic_gettime(&timeval);
53593 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53594 + wday = runtime / (3600 * 24);
53595 + runtime -= wday * (3600 * 24);
53596 + whr = runtime / 3600;
53597 + runtime -= whr * 3600;
53598 + wmin = runtime / 60;
53599 + runtime -= wmin * 60;
53600 + wsec = runtime;
53601 +
53602 + cputime = (task->utime + task->stime) / HZ;
53603 + cday = cputime / (3600 * 24);
53604 + cputime -= cday * (3600 * 24);
53605 + chr = cputime / 3600;
53606 + cputime -= chr * 3600;
53607 + cmin = cputime / 60;
53608 + cputime -= cmin * 60;
53609 + csec = cputime;
53610 +
53611 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53612 +
53613 + return;
53614 +}
53615 +
53616 +void gr_set_kernel_label(struct task_struct *task)
53617 +{
53618 + if (gr_status & GR_READY) {
53619 + task->role = kernel_role;
53620 + task->acl = kernel_role->root_label;
53621 + }
53622 + return;
53623 +}
53624 +
53625 +#ifdef CONFIG_TASKSTATS
53626 +int gr_is_taskstats_denied(int pid)
53627 +{
53628 + struct task_struct *task;
53629 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53630 + const struct cred *cred;
53631 +#endif
53632 + int ret = 0;
53633 +
53634 + /* restrict taskstats viewing to un-chrooted root users
53635 + who have the 'view' subject flag if the RBAC system is enabled
53636 + */
53637 +
53638 + rcu_read_lock();
53639 + read_lock(&tasklist_lock);
53640 + task = find_task_by_vpid(pid);
53641 + if (task) {
53642 +#ifdef CONFIG_GRKERNSEC_CHROOT
53643 + if (proc_is_chrooted(task))
53644 + ret = -EACCES;
53645 +#endif
53646 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53647 + cred = __task_cred(task);
53648 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53649 + if (cred->uid != 0)
53650 + ret = -EACCES;
53651 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53652 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53653 + ret = -EACCES;
53654 +#endif
53655 +#endif
53656 + if (gr_status & GR_READY) {
53657 + if (!(task->acl->mode & GR_VIEW))
53658 + ret = -EACCES;
53659 + }
53660 + } else
53661 + ret = -ENOENT;
53662 +
53663 + read_unlock(&tasklist_lock);
53664 + rcu_read_unlock();
53665 +
53666 + return ret;
53667 +}
53668 +#endif
53669 +
53670 +/* AUXV entries are filled via a descendant of search_binary_handler
53671 + after we've already applied the subject for the target
53672 +*/
53673 +int gr_acl_enable_at_secure(void)
53674 +{
53675 + if (unlikely(!(gr_status & GR_READY)))
53676 + return 0;
53677 +
53678 + if (current->acl->mode & GR_ATSECURE)
53679 + return 1;
53680 +
53681 + return 0;
53682 +}
53683 +
53684 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
53685 +{
53686 + struct task_struct *task = current;
53687 + struct dentry *dentry = file->f_path.dentry;
53688 + struct vfsmount *mnt = file->f_path.mnt;
53689 + struct acl_object_label *obj, *tmp;
53690 + struct acl_subject_label *subj;
53691 + unsigned int bufsize;
53692 + int is_not_root;
53693 + char *path;
53694 + dev_t dev = __get_dev(dentry);
53695 +
53696 + if (unlikely(!(gr_status & GR_READY)))
53697 + return 1;
53698 +
53699 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53700 + return 1;
53701 +
53702 + /* ignore Eric Biederman */
53703 + if (IS_PRIVATE(dentry->d_inode))
53704 + return 1;
53705 +
53706 + subj = task->acl;
53707 + do {
53708 + obj = lookup_acl_obj_label(ino, dev, subj);
53709 + if (obj != NULL)
53710 + return (obj->mode & GR_FIND) ? 1 : 0;
53711 + } while ((subj = subj->parent_subject));
53712 +
53713 + /* this is purely an optimization since we're looking for an object
53714 + for the directory we're doing a readdir on
53715 + if it's possible for any globbed object to match the entry we're
53716 + filling into the directory, then the object we find here will be
53717 + an anchor point with attached globbed objects
53718 + */
53719 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
53720 + if (obj->globbed == NULL)
53721 + return (obj->mode & GR_FIND) ? 1 : 0;
53722 +
53723 + is_not_root = ((obj->filename[0] == '/') &&
53724 + (obj->filename[1] == '\0')) ? 0 : 1;
53725 + bufsize = PAGE_SIZE - namelen - is_not_root;
53726 +
53727 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
53728 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
53729 + return 1;
53730 +
53731 + preempt_disable();
53732 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53733 + bufsize);
53734 +
53735 + bufsize = strlen(path);
53736 +
53737 + /* if base is "/", don't append an additional slash */
53738 + if (is_not_root)
53739 + *(path + bufsize) = '/';
53740 + memcpy(path + bufsize + is_not_root, name, namelen);
53741 + *(path + bufsize + namelen + is_not_root) = '\0';
53742 +
53743 + tmp = obj->globbed;
53744 + while (tmp) {
53745 + if (!glob_match(tmp->filename, path)) {
53746 + preempt_enable();
53747 + return (tmp->mode & GR_FIND) ? 1 : 0;
53748 + }
53749 + tmp = tmp->next;
53750 + }
53751 + preempt_enable();
53752 + return (obj->mode & GR_FIND) ? 1 : 0;
53753 +}
53754 +
53755 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
53756 +EXPORT_SYMBOL(gr_acl_is_enabled);
53757 +#endif
53758 +EXPORT_SYMBOL(gr_learn_resource);
53759 +EXPORT_SYMBOL(gr_set_kernel_label);
53760 +#ifdef CONFIG_SECURITY
53761 +EXPORT_SYMBOL(gr_check_user_change);
53762 +EXPORT_SYMBOL(gr_check_group_change);
53763 +#endif
53764 +
53765 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
53766 new file mode 100644
53767 index 0000000..34fefda
53768 --- /dev/null
53769 +++ b/grsecurity/gracl_alloc.c
53770 @@ -0,0 +1,105 @@
53771 +#include <linux/kernel.h>
53772 +#include <linux/mm.h>
53773 +#include <linux/slab.h>
53774 +#include <linux/vmalloc.h>
53775 +#include <linux/gracl.h>
53776 +#include <linux/grsecurity.h>
53777 +
53778 +static unsigned long alloc_stack_next = 1;
53779 +static unsigned long alloc_stack_size = 1;
53780 +static void **alloc_stack;
53781 +
53782 +static __inline__ int
53783 +alloc_pop(void)
53784 +{
53785 + if (alloc_stack_next == 1)
53786 + return 0;
53787 +
53788 + kfree(alloc_stack[alloc_stack_next - 2]);
53789 +
53790 + alloc_stack_next--;
53791 +
53792 + return 1;
53793 +}
53794 +
53795 +static __inline__ int
53796 +alloc_push(void *buf)
53797 +{
53798 + if (alloc_stack_next >= alloc_stack_size)
53799 + return 1;
53800 +
53801 + alloc_stack[alloc_stack_next - 1] = buf;
53802 +
53803 + alloc_stack_next++;
53804 +
53805 + return 0;
53806 +}
53807 +
53808 +void *
53809 +acl_alloc(unsigned long len)
53810 +{
53811 + void *ret = NULL;
53812 +
53813 + if (!len || len > PAGE_SIZE)
53814 + goto out;
53815 +
53816 + ret = kmalloc(len, GFP_KERNEL);
53817 +
53818 + if (ret) {
53819 + if (alloc_push(ret)) {
53820 + kfree(ret);
53821 + ret = NULL;
53822 + }
53823 + }
53824 +
53825 +out:
53826 + return ret;
53827 +}
53828 +
53829 +void *
53830 +acl_alloc_num(unsigned long num, unsigned long len)
53831 +{
53832 + if (!len || (num > (PAGE_SIZE / len)))
53833 + return NULL;
53834 +
53835 + return acl_alloc(num * len);
53836 +}
53837 +
53838 +void
53839 +acl_free_all(void)
53840 +{
53841 + if (gr_acl_is_enabled() || !alloc_stack)
53842 + return;
53843 +
53844 + while (alloc_pop()) ;
53845 +
53846 + if (alloc_stack) {
53847 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
53848 + kfree(alloc_stack);
53849 + else
53850 + vfree(alloc_stack);
53851 + }
53852 +
53853 + alloc_stack = NULL;
53854 + alloc_stack_size = 1;
53855 + alloc_stack_next = 1;
53856 +
53857 + return;
53858 +}
53859 +
53860 +int
53861 +acl_alloc_stack_init(unsigned long size)
53862 +{
53863 + if ((size * sizeof (void *)) <= PAGE_SIZE)
53864 + alloc_stack =
53865 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
53866 + else
53867 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
53868 +
53869 + alloc_stack_size = size;
53870 +
53871 + if (!alloc_stack)
53872 + return 0;
53873 + else
53874 + return 1;
53875 +}
53876 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
53877 new file mode 100644
53878 index 0000000..6d21049
53879 --- /dev/null
53880 +++ b/grsecurity/gracl_cap.c
53881 @@ -0,0 +1,110 @@
53882 +#include <linux/kernel.h>
53883 +#include <linux/module.h>
53884 +#include <linux/sched.h>
53885 +#include <linux/gracl.h>
53886 +#include <linux/grsecurity.h>
53887 +#include <linux/grinternal.h>
53888 +
53889 +extern const char *captab_log[];
53890 +extern int captab_log_entries;
53891 +
53892 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
53893 +{
53894 + struct acl_subject_label *curracl;
53895 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
53896 + kernel_cap_t cap_audit = __cap_empty_set;
53897 +
53898 + if (!gr_acl_is_enabled())
53899 + return 1;
53900 +
53901 + curracl = task->acl;
53902 +
53903 + cap_drop = curracl->cap_lower;
53904 + cap_mask = curracl->cap_mask;
53905 + cap_audit = curracl->cap_invert_audit;
53906 +
53907 + while ((curracl = curracl->parent_subject)) {
53908 + /* if the cap isn't specified in the current computed mask but is specified in the
53909 + current level subject, and is lowered in the current level subject, then add
53910 + it to the set of dropped capabilities
53911 + otherwise, add the current level subject's mask to the current computed mask
53912 + */
53913 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
53914 + cap_raise(cap_mask, cap);
53915 + if (cap_raised(curracl->cap_lower, cap))
53916 + cap_raise(cap_drop, cap);
53917 + if (cap_raised(curracl->cap_invert_audit, cap))
53918 + cap_raise(cap_audit, cap);
53919 + }
53920 + }
53921 +
53922 + if (!cap_raised(cap_drop, cap)) {
53923 + if (cap_raised(cap_audit, cap))
53924 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
53925 + return 1;
53926 + }
53927 +
53928 + curracl = task->acl;
53929 +
53930 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
53931 + && cap_raised(cred->cap_effective, cap)) {
53932 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53933 + task->role->roletype, cred->uid,
53934 + cred->gid, task->exec_file ?
53935 + gr_to_filename(task->exec_file->f_path.dentry,
53936 + task->exec_file->f_path.mnt) : curracl->filename,
53937 + curracl->filename, 0UL,
53938 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
53939 + return 1;
53940 + }
53941 +
53942 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
53943 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
53944 +
53945 + return 0;
53946 +}
53947 +
53948 +int
53949 +gr_acl_is_capable(const int cap)
53950 +{
53951 + return gr_task_acl_is_capable(current, current_cred(), cap);
53952 +}
53953 +
53954 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
53955 +{
53956 + struct acl_subject_label *curracl;
53957 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
53958 +
53959 + if (!gr_acl_is_enabled())
53960 + return 1;
53961 +
53962 + curracl = task->acl;
53963 +
53964 + cap_drop = curracl->cap_lower;
53965 + cap_mask = curracl->cap_mask;
53966 +
53967 + while ((curracl = curracl->parent_subject)) {
53968 + /* if the cap isn't specified in the current computed mask but is specified in the
53969 + current level subject, and is lowered in the current level subject, then add
53970 + it to the set of dropped capabilities
53971 + otherwise, add the current level subject's mask to the current computed mask
53972 + */
53973 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
53974 + cap_raise(cap_mask, cap);
53975 + if (cap_raised(curracl->cap_lower, cap))
53976 + cap_raise(cap_drop, cap);
53977 + }
53978 + }
53979 +
53980 + if (!cap_raised(cap_drop, cap))
53981 + return 1;
53982 +
53983 + return 0;
53984 +}
53985 +
53986 +int
53987 +gr_acl_is_capable_nolog(const int cap)
53988 +{
53989 + return gr_task_acl_is_capable_nolog(current, cap);
53990 +}
53991 +
53992 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
53993 new file mode 100644
53994 index 0000000..88d0e87
53995 --- /dev/null
53996 +++ b/grsecurity/gracl_fs.c
53997 @@ -0,0 +1,435 @@
53998 +#include <linux/kernel.h>
53999 +#include <linux/sched.h>
54000 +#include <linux/types.h>
54001 +#include <linux/fs.h>
54002 +#include <linux/file.h>
54003 +#include <linux/stat.h>
54004 +#include <linux/grsecurity.h>
54005 +#include <linux/grinternal.h>
54006 +#include <linux/gracl.h>
54007 +
54008 +umode_t
54009 +gr_acl_umask(void)
54010 +{
54011 + if (unlikely(!gr_acl_is_enabled()))
54012 + return 0;
54013 +
54014 + return current->role->umask;
54015 +}
54016 +
54017 +__u32
54018 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54019 + const struct vfsmount * mnt)
54020 +{
54021 + __u32 mode;
54022 +
54023 + if (unlikely(!dentry->d_inode))
54024 + return GR_FIND;
54025 +
54026 + mode =
54027 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54028 +
54029 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54030 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54031 + return mode;
54032 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54033 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54034 + return 0;
54035 + } else if (unlikely(!(mode & GR_FIND)))
54036 + return 0;
54037 +
54038 + return GR_FIND;
54039 +}
54040 +
54041 +__u32
54042 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54043 + int acc_mode)
54044 +{
54045 + __u32 reqmode = GR_FIND;
54046 + __u32 mode;
54047 +
54048 + if (unlikely(!dentry->d_inode))
54049 + return reqmode;
54050 +
54051 + if (acc_mode & MAY_APPEND)
54052 + reqmode |= GR_APPEND;
54053 + else if (acc_mode & MAY_WRITE)
54054 + reqmode |= GR_WRITE;
54055 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54056 + reqmode |= GR_READ;
54057 +
54058 + mode =
54059 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54060 + mnt);
54061 +
54062 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54063 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54064 + reqmode & GR_READ ? " reading" : "",
54065 + reqmode & GR_WRITE ? " writing" : reqmode &
54066 + GR_APPEND ? " appending" : "");
54067 + return reqmode;
54068 + } else
54069 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54070 + {
54071 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54072 + reqmode & GR_READ ? " reading" : "",
54073 + reqmode & GR_WRITE ? " writing" : reqmode &
54074 + GR_APPEND ? " appending" : "");
54075 + return 0;
54076 + } else if (unlikely((mode & reqmode) != reqmode))
54077 + return 0;
54078 +
54079 + return reqmode;
54080 +}
54081 +
54082 +__u32
54083 +gr_acl_handle_creat(const struct dentry * dentry,
54084 + const struct dentry * p_dentry,
54085 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54086 + const int imode)
54087 +{
54088 + __u32 reqmode = GR_WRITE | GR_CREATE;
54089 + __u32 mode;
54090 +
54091 + if (acc_mode & MAY_APPEND)
54092 + reqmode |= GR_APPEND;
54093 + // if a directory was required or the directory already exists, then
54094 + // don't count this open as a read
54095 + if ((acc_mode & MAY_READ) &&
54096 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54097 + reqmode |= GR_READ;
54098 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54099 + reqmode |= GR_SETID;
54100 +
54101 + mode =
54102 + gr_check_create(dentry, p_dentry, p_mnt,
54103 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54104 +
54105 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54106 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54107 + reqmode & GR_READ ? " reading" : "",
54108 + reqmode & GR_WRITE ? " writing" : reqmode &
54109 + GR_APPEND ? " appending" : "");
54110 + return reqmode;
54111 + } else
54112 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54113 + {
54114 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54115 + reqmode & GR_READ ? " reading" : "",
54116 + reqmode & GR_WRITE ? " writing" : reqmode &
54117 + GR_APPEND ? " appending" : "");
54118 + return 0;
54119 + } else if (unlikely((mode & reqmode) != reqmode))
54120 + return 0;
54121 +
54122 + return reqmode;
54123 +}
54124 +
54125 +__u32
54126 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54127 + const int fmode)
54128 +{
54129 + __u32 mode, reqmode = GR_FIND;
54130 +
54131 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54132 + reqmode |= GR_EXEC;
54133 + if (fmode & S_IWOTH)
54134 + reqmode |= GR_WRITE;
54135 + if (fmode & S_IROTH)
54136 + reqmode |= GR_READ;
54137 +
54138 + mode =
54139 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54140 + mnt);
54141 +
54142 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54143 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54144 + reqmode & GR_READ ? " reading" : "",
54145 + reqmode & GR_WRITE ? " writing" : "",
54146 + reqmode & GR_EXEC ? " executing" : "");
54147 + return reqmode;
54148 + } else
54149 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54150 + {
54151 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54152 + reqmode & GR_READ ? " reading" : "",
54153 + reqmode & GR_WRITE ? " writing" : "",
54154 + reqmode & GR_EXEC ? " executing" : "");
54155 + return 0;
54156 + } else if (unlikely((mode & reqmode) != reqmode))
54157 + return 0;
54158 +
54159 + return reqmode;
54160 +}
54161 +
54162 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54163 +{
54164 + __u32 mode;
54165 +
54166 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54167 +
54168 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54169 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54170 + return mode;
54171 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54172 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54173 + return 0;
54174 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54175 + return 0;
54176 +
54177 + return (reqmode);
54178 +}
54179 +
54180 +__u32
54181 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54182 +{
54183 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54184 +}
54185 +
54186 +__u32
54187 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54188 +{
54189 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54190 +}
54191 +
54192 +__u32
54193 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54194 +{
54195 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54196 +}
54197 +
54198 +__u32
54199 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54200 +{
54201 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54202 +}
54203 +
54204 +__u32
54205 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54206 + umode_t *modeptr)
54207 +{
54208 + umode_t mode;
54209 +
54210 + *modeptr &= ~gr_acl_umask();
54211 + mode = *modeptr;
54212 +
54213 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54214 + return 1;
54215 +
54216 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54217 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54218 + GR_CHMOD_ACL_MSG);
54219 + } else {
54220 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54221 + }
54222 +}
54223 +
54224 +__u32
54225 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54226 +{
54227 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54228 +}
54229 +
54230 +__u32
54231 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54232 +{
54233 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54234 +}
54235 +
54236 +__u32
54237 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54238 +{
54239 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54240 +}
54241 +
54242 +__u32
54243 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54244 +{
54245 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54246 + GR_UNIXCONNECT_ACL_MSG);
54247 +}
54248 +
54249 +/* hardlinks require at minimum create and link permission,
54250 + any additional privilege required is based on the
54251 + privilege of the file being linked to
54252 +*/
54253 +__u32
54254 +gr_acl_handle_link(const struct dentry * new_dentry,
54255 + const struct dentry * parent_dentry,
54256 + const struct vfsmount * parent_mnt,
54257 + const struct dentry * old_dentry,
54258 + const struct vfsmount * old_mnt, const char *to)
54259 +{
54260 + __u32 mode;
54261 + __u32 needmode = GR_CREATE | GR_LINK;
54262 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54263 +
54264 + mode =
54265 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54266 + old_mnt);
54267 +
54268 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54269 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54270 + return mode;
54271 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54272 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54273 + return 0;
54274 + } else if (unlikely((mode & needmode) != needmode))
54275 + return 0;
54276 +
54277 + return 1;
54278 +}
54279 +
54280 +__u32
54281 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54282 + const struct dentry * parent_dentry,
54283 + const struct vfsmount * parent_mnt, const char *from)
54284 +{
54285 + __u32 needmode = GR_WRITE | GR_CREATE;
54286 + __u32 mode;
54287 +
54288 + mode =
54289 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54290 + GR_CREATE | GR_AUDIT_CREATE |
54291 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54292 +
54293 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54294 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54295 + return mode;
54296 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54297 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54298 + return 0;
54299 + } else if (unlikely((mode & needmode) != needmode))
54300 + return 0;
54301 +
54302 + return (GR_WRITE | GR_CREATE);
54303 +}
54304 +
54305 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54306 +{
54307 + __u32 mode;
54308 +
54309 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54310 +
54311 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54312 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54313 + return mode;
54314 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54315 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54316 + return 0;
54317 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54318 + return 0;
54319 +
54320 + return (reqmode);
54321 +}
54322 +
54323 +__u32
54324 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54325 + const struct dentry * parent_dentry,
54326 + const struct vfsmount * parent_mnt,
54327 + const int mode)
54328 +{
54329 + __u32 reqmode = GR_WRITE | GR_CREATE;
54330 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54331 + reqmode |= GR_SETID;
54332 +
54333 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54334 + reqmode, GR_MKNOD_ACL_MSG);
54335 +}
54336 +
54337 +__u32
54338 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54339 + const struct dentry *parent_dentry,
54340 + const struct vfsmount *parent_mnt)
54341 +{
54342 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54343 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54344 +}
54345 +
54346 +#define RENAME_CHECK_SUCCESS(old, new) \
54347 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54348 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54349 +
54350 +int
54351 +gr_acl_handle_rename(struct dentry *new_dentry,
54352 + struct dentry *parent_dentry,
54353 + const struct vfsmount *parent_mnt,
54354 + struct dentry *old_dentry,
54355 + struct inode *old_parent_inode,
54356 + struct vfsmount *old_mnt, const char *newname)
54357 +{
54358 + __u32 comp1, comp2;
54359 + int error = 0;
54360 +
54361 + if (unlikely(!gr_acl_is_enabled()))
54362 + return 0;
54363 +
54364 + if (!new_dentry->d_inode) {
54365 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54366 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54367 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54368 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54369 + GR_DELETE | GR_AUDIT_DELETE |
54370 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54371 + GR_SUPPRESS, old_mnt);
54372 + } else {
54373 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54374 + GR_CREATE | GR_DELETE |
54375 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54376 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54377 + GR_SUPPRESS, parent_mnt);
54378 + comp2 =
54379 + gr_search_file(old_dentry,
54380 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54381 + GR_DELETE | GR_AUDIT_DELETE |
54382 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54383 + }
54384 +
54385 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54386 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54387 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54388 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54389 + && !(comp2 & GR_SUPPRESS)) {
54390 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54391 + error = -EACCES;
54392 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54393 + error = -EACCES;
54394 +
54395 + return error;
54396 +}
54397 +
54398 +void
54399 +gr_acl_handle_exit(void)
54400 +{
54401 + u16 id;
54402 + char *rolename;
54403 + struct file *exec_file;
54404 +
54405 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54406 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54407 + id = current->acl_role_id;
54408 + rolename = current->role->rolename;
54409 + gr_set_acls(1);
54410 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54411 + }
54412 +
54413 + write_lock(&grsec_exec_file_lock);
54414 + exec_file = current->exec_file;
54415 + current->exec_file = NULL;
54416 + write_unlock(&grsec_exec_file_lock);
54417 +
54418 + if (exec_file)
54419 + fput(exec_file);
54420 +}
54421 +
54422 +int
54423 +gr_acl_handle_procpidmem(const struct task_struct *task)
54424 +{
54425 + if (unlikely(!gr_acl_is_enabled()))
54426 + return 0;
54427 +
54428 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54429 + return -EACCES;
54430 +
54431 + return 0;
54432 +}
54433 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54434 new file mode 100644
54435 index 0000000..17050ca
54436 --- /dev/null
54437 +++ b/grsecurity/gracl_ip.c
54438 @@ -0,0 +1,381 @@
54439 +#include <linux/kernel.h>
54440 +#include <asm/uaccess.h>
54441 +#include <asm/errno.h>
54442 +#include <net/sock.h>
54443 +#include <linux/file.h>
54444 +#include <linux/fs.h>
54445 +#include <linux/net.h>
54446 +#include <linux/in.h>
54447 +#include <linux/skbuff.h>
54448 +#include <linux/ip.h>
54449 +#include <linux/udp.h>
54450 +#include <linux/types.h>
54451 +#include <linux/sched.h>
54452 +#include <linux/netdevice.h>
54453 +#include <linux/inetdevice.h>
54454 +#include <linux/gracl.h>
54455 +#include <linux/grsecurity.h>
54456 +#include <linux/grinternal.h>
54457 +
54458 +#define GR_BIND 0x01
54459 +#define GR_CONNECT 0x02
54460 +#define GR_INVERT 0x04
54461 +#define GR_BINDOVERRIDE 0x08
54462 +#define GR_CONNECTOVERRIDE 0x10
54463 +#define GR_SOCK_FAMILY 0x20
54464 +
54465 +static const char * gr_protocols[IPPROTO_MAX] = {
54466 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54467 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54468 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54469 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54470 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54471 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54472 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54473 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54474 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54475 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54476 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54477 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54478 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54479 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54480 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54481 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54482 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54483 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54484 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54485 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54486 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54487 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54488 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54489 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54490 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54491 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54492 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54493 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54494 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54495 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54496 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54497 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54498 + };
54499 +
54500 +static const char * gr_socktypes[SOCK_MAX] = {
54501 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54502 + "unknown:7", "unknown:8", "unknown:9", "packet"
54503 + };
54504 +
54505 +static const char * gr_sockfamilies[AF_MAX+1] = {
54506 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54507 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54508 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54509 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54510 + };
54511 +
54512 +const char *
54513 +gr_proto_to_name(unsigned char proto)
54514 +{
54515 + return gr_protocols[proto];
54516 +}
54517 +
54518 +const char *
54519 +gr_socktype_to_name(unsigned char type)
54520 +{
54521 + return gr_socktypes[type];
54522 +}
54523 +
54524 +const char *
54525 +gr_sockfamily_to_name(unsigned char family)
54526 +{
54527 + return gr_sockfamilies[family];
54528 +}
54529 +
54530 +int
54531 +gr_search_socket(const int domain, const int type, const int protocol)
54532 +{
54533 + struct acl_subject_label *curr;
54534 + const struct cred *cred = current_cred();
54535 +
54536 + if (unlikely(!gr_acl_is_enabled()))
54537 + goto exit;
54538 +
54539 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54540 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54541 + goto exit; // let the kernel handle it
54542 +
54543 + curr = current->acl;
54544 +
54545 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54546 + /* the family is allowed, if this is PF_INET allow it only if
54547 + the extra sock type/protocol checks pass */
54548 + if (domain == PF_INET)
54549 + goto inet_check;
54550 + goto exit;
54551 + } else {
54552 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54553 + __u32 fakeip = 0;
54554 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54555 + current->role->roletype, cred->uid,
54556 + cred->gid, current->exec_file ?
54557 + gr_to_filename(current->exec_file->f_path.dentry,
54558 + current->exec_file->f_path.mnt) :
54559 + curr->filename, curr->filename,
54560 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54561 + &current->signal->saved_ip);
54562 + goto exit;
54563 + }
54564 + goto exit_fail;
54565 + }
54566 +
54567 +inet_check:
54568 + /* the rest of this checking is for IPv4 only */
54569 + if (!curr->ips)
54570 + goto exit;
54571 +
54572 + if ((curr->ip_type & (1 << type)) &&
54573 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54574 + goto exit;
54575 +
54576 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54577 + /* we don't place acls on raw sockets , and sometimes
54578 + dgram/ip sockets are opened for ioctl and not
54579 + bind/connect, so we'll fake a bind learn log */
54580 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54581 + __u32 fakeip = 0;
54582 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54583 + current->role->roletype, cred->uid,
54584 + cred->gid, current->exec_file ?
54585 + gr_to_filename(current->exec_file->f_path.dentry,
54586 + current->exec_file->f_path.mnt) :
54587 + curr->filename, curr->filename,
54588 + &fakeip, 0, type,
54589 + protocol, GR_CONNECT, &current->signal->saved_ip);
54590 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54591 + __u32 fakeip = 0;
54592 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54593 + current->role->roletype, cred->uid,
54594 + cred->gid, current->exec_file ?
54595 + gr_to_filename(current->exec_file->f_path.dentry,
54596 + current->exec_file->f_path.mnt) :
54597 + curr->filename, curr->filename,
54598 + &fakeip, 0, type,
54599 + protocol, GR_BIND, &current->signal->saved_ip);
54600 + }
54601 + /* we'll log when they use connect or bind */
54602 + goto exit;
54603 + }
54604 +
54605 +exit_fail:
54606 + if (domain == PF_INET)
54607 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54608 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54609 + else
54610 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54611 + gr_socktype_to_name(type), protocol);
54612 +
54613 + return 0;
54614 +exit:
54615 + return 1;
54616 +}
54617 +
54618 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54619 +{
54620 + if ((ip->mode & mode) &&
54621 + (ip_port >= ip->low) &&
54622 + (ip_port <= ip->high) &&
54623 + ((ntohl(ip_addr) & our_netmask) ==
54624 + (ntohl(our_addr) & our_netmask))
54625 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54626 + && (ip->type & (1 << type))) {
54627 + if (ip->mode & GR_INVERT)
54628 + return 2; // specifically denied
54629 + else
54630 + return 1; // allowed
54631 + }
54632 +
54633 + return 0; // not specifically allowed, may continue parsing
54634 +}
54635 +
54636 +static int
54637 +gr_search_connectbind(const int full_mode, struct sock *sk,
54638 + struct sockaddr_in *addr, const int type)
54639 +{
54640 + char iface[IFNAMSIZ] = {0};
54641 + struct acl_subject_label *curr;
54642 + struct acl_ip_label *ip;
54643 + struct inet_sock *isk;
54644 + struct net_device *dev;
54645 + struct in_device *idev;
54646 + unsigned long i;
54647 + int ret;
54648 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54649 + __u32 ip_addr = 0;
54650 + __u32 our_addr;
54651 + __u32 our_netmask;
54652 + char *p;
54653 + __u16 ip_port = 0;
54654 + const struct cred *cred = current_cred();
54655 +
54656 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54657 + return 0;
54658 +
54659 + curr = current->acl;
54660 + isk = inet_sk(sk);
54661 +
54662 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54663 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54664 + addr->sin_addr.s_addr = curr->inaddr_any_override;
54665 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54666 + struct sockaddr_in saddr;
54667 + int err;
54668 +
54669 + saddr.sin_family = AF_INET;
54670 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
54671 + saddr.sin_port = isk->inet_sport;
54672 +
54673 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54674 + if (err)
54675 + return err;
54676 +
54677 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54678 + if (err)
54679 + return err;
54680 + }
54681 +
54682 + if (!curr->ips)
54683 + return 0;
54684 +
54685 + ip_addr = addr->sin_addr.s_addr;
54686 + ip_port = ntohs(addr->sin_port);
54687 +
54688 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54689 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54690 + current->role->roletype, cred->uid,
54691 + cred->gid, current->exec_file ?
54692 + gr_to_filename(current->exec_file->f_path.dentry,
54693 + current->exec_file->f_path.mnt) :
54694 + curr->filename, curr->filename,
54695 + &ip_addr, ip_port, type,
54696 + sk->sk_protocol, mode, &current->signal->saved_ip);
54697 + return 0;
54698 + }
54699 +
54700 + for (i = 0; i < curr->ip_num; i++) {
54701 + ip = *(curr->ips + i);
54702 + if (ip->iface != NULL) {
54703 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
54704 + p = strchr(iface, ':');
54705 + if (p != NULL)
54706 + *p = '\0';
54707 + dev = dev_get_by_name(sock_net(sk), iface);
54708 + if (dev == NULL)
54709 + continue;
54710 + idev = in_dev_get(dev);
54711 + if (idev == NULL) {
54712 + dev_put(dev);
54713 + continue;
54714 + }
54715 + rcu_read_lock();
54716 + for_ifa(idev) {
54717 + if (!strcmp(ip->iface, ifa->ifa_label)) {
54718 + our_addr = ifa->ifa_address;
54719 + our_netmask = 0xffffffff;
54720 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54721 + if (ret == 1) {
54722 + rcu_read_unlock();
54723 + in_dev_put(idev);
54724 + dev_put(dev);
54725 + return 0;
54726 + } else if (ret == 2) {
54727 + rcu_read_unlock();
54728 + in_dev_put(idev);
54729 + dev_put(dev);
54730 + goto denied;
54731 + }
54732 + }
54733 + } endfor_ifa(idev);
54734 + rcu_read_unlock();
54735 + in_dev_put(idev);
54736 + dev_put(dev);
54737 + } else {
54738 + our_addr = ip->addr;
54739 + our_netmask = ip->netmask;
54740 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54741 + if (ret == 1)
54742 + return 0;
54743 + else if (ret == 2)
54744 + goto denied;
54745 + }
54746 + }
54747 +
54748 +denied:
54749 + if (mode == GR_BIND)
54750 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54751 + else if (mode == GR_CONNECT)
54752 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54753 +
54754 + return -EACCES;
54755 +}
54756 +
54757 +int
54758 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
54759 +{
54760 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
54761 +}
54762 +
54763 +int
54764 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
54765 +{
54766 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
54767 +}
54768 +
54769 +int gr_search_listen(struct socket *sock)
54770 +{
54771 + struct sock *sk = sock->sk;
54772 + struct sockaddr_in addr;
54773 +
54774 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54775 + addr.sin_port = inet_sk(sk)->inet_sport;
54776 +
54777 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54778 +}
54779 +
54780 +int gr_search_accept(struct socket *sock)
54781 +{
54782 + struct sock *sk = sock->sk;
54783 + struct sockaddr_in addr;
54784 +
54785 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54786 + addr.sin_port = inet_sk(sk)->inet_sport;
54787 +
54788 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54789 +}
54790 +
54791 +int
54792 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
54793 +{
54794 + if (addr)
54795 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
54796 + else {
54797 + struct sockaddr_in sin;
54798 + const struct inet_sock *inet = inet_sk(sk);
54799 +
54800 + sin.sin_addr.s_addr = inet->inet_daddr;
54801 + sin.sin_port = inet->inet_dport;
54802 +
54803 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
54804 + }
54805 +}
54806 +
54807 +int
54808 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
54809 +{
54810 + struct sockaddr_in sin;
54811 +
54812 + if (unlikely(skb->len < sizeof (struct udphdr)))
54813 + return 0; // skip this packet
54814 +
54815 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
54816 + sin.sin_port = udp_hdr(skb)->source;
54817 +
54818 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
54819 +}
54820 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
54821 new file mode 100644
54822 index 0000000..25f54ef
54823 --- /dev/null
54824 +++ b/grsecurity/gracl_learn.c
54825 @@ -0,0 +1,207 @@
54826 +#include <linux/kernel.h>
54827 +#include <linux/mm.h>
54828 +#include <linux/sched.h>
54829 +#include <linux/poll.h>
54830 +#include <linux/string.h>
54831 +#include <linux/file.h>
54832 +#include <linux/types.h>
54833 +#include <linux/vmalloc.h>
54834 +#include <linux/grinternal.h>
54835 +
54836 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
54837 + size_t count, loff_t *ppos);
54838 +extern int gr_acl_is_enabled(void);
54839 +
54840 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
54841 +static int gr_learn_attached;
54842 +
54843 +/* use a 512k buffer */
54844 +#define LEARN_BUFFER_SIZE (512 * 1024)
54845 +
54846 +static DEFINE_SPINLOCK(gr_learn_lock);
54847 +static DEFINE_MUTEX(gr_learn_user_mutex);
54848 +
54849 +/* we need to maintain two buffers, so that the kernel context of grlearn
54850 + uses a semaphore around the userspace copying, and the other kernel contexts
54851 + use a spinlock when copying into the buffer, since they cannot sleep
54852 +*/
54853 +static char *learn_buffer;
54854 +static char *learn_buffer_user;
54855 +static int learn_buffer_len;
54856 +static int learn_buffer_user_len;
54857 +
54858 +static ssize_t
54859 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
54860 +{
54861 + DECLARE_WAITQUEUE(wait, current);
54862 + ssize_t retval = 0;
54863 +
54864 + add_wait_queue(&learn_wait, &wait);
54865 + set_current_state(TASK_INTERRUPTIBLE);
54866 + do {
54867 + mutex_lock(&gr_learn_user_mutex);
54868 + spin_lock(&gr_learn_lock);
54869 + if (learn_buffer_len)
54870 + break;
54871 + spin_unlock(&gr_learn_lock);
54872 + mutex_unlock(&gr_learn_user_mutex);
54873 + if (file->f_flags & O_NONBLOCK) {
54874 + retval = -EAGAIN;
54875 + goto out;
54876 + }
54877 + if (signal_pending(current)) {
54878 + retval = -ERESTARTSYS;
54879 + goto out;
54880 + }
54881 +
54882 + schedule();
54883 + } while (1);
54884 +
54885 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
54886 + learn_buffer_user_len = learn_buffer_len;
54887 + retval = learn_buffer_len;
54888 + learn_buffer_len = 0;
54889 +
54890 + spin_unlock(&gr_learn_lock);
54891 +
54892 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
54893 + retval = -EFAULT;
54894 +
54895 + mutex_unlock(&gr_learn_user_mutex);
54896 +out:
54897 + set_current_state(TASK_RUNNING);
54898 + remove_wait_queue(&learn_wait, &wait);
54899 + return retval;
54900 +}
54901 +
54902 +static unsigned int
54903 +poll_learn(struct file * file, poll_table * wait)
54904 +{
54905 + poll_wait(file, &learn_wait, wait);
54906 +
54907 + if (learn_buffer_len)
54908 + return (POLLIN | POLLRDNORM);
54909 +
54910 + return 0;
54911 +}
54912 +
54913 +void
54914 +gr_clear_learn_entries(void)
54915 +{
54916 + char *tmp;
54917 +
54918 + mutex_lock(&gr_learn_user_mutex);
54919 + spin_lock(&gr_learn_lock);
54920 + tmp = learn_buffer;
54921 + learn_buffer = NULL;
54922 + spin_unlock(&gr_learn_lock);
54923 + if (tmp)
54924 + vfree(tmp);
54925 + if (learn_buffer_user != NULL) {
54926 + vfree(learn_buffer_user);
54927 + learn_buffer_user = NULL;
54928 + }
54929 + learn_buffer_len = 0;
54930 + mutex_unlock(&gr_learn_user_mutex);
54931 +
54932 + return;
54933 +}
54934 +
54935 +void
54936 +gr_add_learn_entry(const char *fmt, ...)
54937 +{
54938 + va_list args;
54939 + unsigned int len;
54940 +
54941 + if (!gr_learn_attached)
54942 + return;
54943 +
54944 + spin_lock(&gr_learn_lock);
54945 +
54946 + /* leave a gap at the end so we know when it's "full" but don't have to
54947 + compute the exact length of the string we're trying to append
54948 + */
54949 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
54950 + spin_unlock(&gr_learn_lock);
54951 + wake_up_interruptible(&learn_wait);
54952 + return;
54953 + }
54954 + if (learn_buffer == NULL) {
54955 + spin_unlock(&gr_learn_lock);
54956 + return;
54957 + }
54958 +
54959 + va_start(args, fmt);
54960 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
54961 + va_end(args);
54962 +
54963 + learn_buffer_len += len + 1;
54964 +
54965 + spin_unlock(&gr_learn_lock);
54966 + wake_up_interruptible(&learn_wait);
54967 +
54968 + return;
54969 +}
54970 +
54971 +static int
54972 +open_learn(struct inode *inode, struct file *file)
54973 +{
54974 + if (file->f_mode & FMODE_READ && gr_learn_attached)
54975 + return -EBUSY;
54976 + if (file->f_mode & FMODE_READ) {
54977 + int retval = 0;
54978 + mutex_lock(&gr_learn_user_mutex);
54979 + if (learn_buffer == NULL)
54980 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
54981 + if (learn_buffer_user == NULL)
54982 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
54983 + if (learn_buffer == NULL) {
54984 + retval = -ENOMEM;
54985 + goto out_error;
54986 + }
54987 + if (learn_buffer_user == NULL) {
54988 + retval = -ENOMEM;
54989 + goto out_error;
54990 + }
54991 + learn_buffer_len = 0;
54992 + learn_buffer_user_len = 0;
54993 + gr_learn_attached = 1;
54994 +out_error:
54995 + mutex_unlock(&gr_learn_user_mutex);
54996 + return retval;
54997 + }
54998 + return 0;
54999 +}
55000 +
55001 +static int
55002 +close_learn(struct inode *inode, struct file *file)
55003 +{
55004 + if (file->f_mode & FMODE_READ) {
55005 + char *tmp = NULL;
55006 + mutex_lock(&gr_learn_user_mutex);
55007 + spin_lock(&gr_learn_lock);
55008 + tmp = learn_buffer;
55009 + learn_buffer = NULL;
55010 + spin_unlock(&gr_learn_lock);
55011 + if (tmp)
55012 + vfree(tmp);
55013 + if (learn_buffer_user != NULL) {
55014 + vfree(learn_buffer_user);
55015 + learn_buffer_user = NULL;
55016 + }
55017 + learn_buffer_len = 0;
55018 + learn_buffer_user_len = 0;
55019 + gr_learn_attached = 0;
55020 + mutex_unlock(&gr_learn_user_mutex);
55021 + }
55022 +
55023 + return 0;
55024 +}
55025 +
55026 +const struct file_operations grsec_fops = {
55027 + .read = read_learn,
55028 + .write = write_grsec_handler,
55029 + .open = open_learn,
55030 + .release = close_learn,
55031 + .poll = poll_learn,
55032 +};
55033 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55034 new file mode 100644
55035 index 0000000..39645c9
55036 --- /dev/null
55037 +++ b/grsecurity/gracl_res.c
55038 @@ -0,0 +1,68 @@
55039 +#include <linux/kernel.h>
55040 +#include <linux/sched.h>
55041 +#include <linux/gracl.h>
55042 +#include <linux/grinternal.h>
55043 +
55044 +static const char *restab_log[] = {
55045 + [RLIMIT_CPU] = "RLIMIT_CPU",
55046 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55047 + [RLIMIT_DATA] = "RLIMIT_DATA",
55048 + [RLIMIT_STACK] = "RLIMIT_STACK",
55049 + [RLIMIT_CORE] = "RLIMIT_CORE",
55050 + [RLIMIT_RSS] = "RLIMIT_RSS",
55051 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55052 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55053 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55054 + [RLIMIT_AS] = "RLIMIT_AS",
55055 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55056 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55057 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55058 + [RLIMIT_NICE] = "RLIMIT_NICE",
55059 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55060 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55061 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55062 +};
55063 +
55064 +void
55065 +gr_log_resource(const struct task_struct *task,
55066 + const int res, const unsigned long wanted, const int gt)
55067 +{
55068 + const struct cred *cred;
55069 + unsigned long rlim;
55070 +
55071 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55072 + return;
55073 +
55074 + // not yet supported resource
55075 + if (unlikely(!restab_log[res]))
55076 + return;
55077 +
55078 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55079 + rlim = task_rlimit_max(task, res);
55080 + else
55081 + rlim = task_rlimit(task, res);
55082 +
55083 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55084 + return;
55085 +
55086 + rcu_read_lock();
55087 + cred = __task_cred(task);
55088 +
55089 + if (res == RLIMIT_NPROC &&
55090 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55091 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55092 + goto out_rcu_unlock;
55093 + else if (res == RLIMIT_MEMLOCK &&
55094 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55095 + goto out_rcu_unlock;
55096 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55097 + goto out_rcu_unlock;
55098 + rcu_read_unlock();
55099 +
55100 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55101 +
55102 + return;
55103 +out_rcu_unlock:
55104 + rcu_read_unlock();
55105 + return;
55106 +}
55107 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55108 new file mode 100644
55109 index 0000000..5556be3
55110 --- /dev/null
55111 +++ b/grsecurity/gracl_segv.c
55112 @@ -0,0 +1,299 @@
55113 +#include <linux/kernel.h>
55114 +#include <linux/mm.h>
55115 +#include <asm/uaccess.h>
55116 +#include <asm/errno.h>
55117 +#include <asm/mman.h>
55118 +#include <net/sock.h>
55119 +#include <linux/file.h>
55120 +#include <linux/fs.h>
55121 +#include <linux/net.h>
55122 +#include <linux/in.h>
55123 +#include <linux/slab.h>
55124 +#include <linux/types.h>
55125 +#include <linux/sched.h>
55126 +#include <linux/timer.h>
55127 +#include <linux/gracl.h>
55128 +#include <linux/grsecurity.h>
55129 +#include <linux/grinternal.h>
55130 +
55131 +static struct crash_uid *uid_set;
55132 +static unsigned short uid_used;
55133 +static DEFINE_SPINLOCK(gr_uid_lock);
55134 +extern rwlock_t gr_inode_lock;
55135 +extern struct acl_subject_label *
55136 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55137 + struct acl_role_label *role);
55138 +
55139 +#ifdef CONFIG_BTRFS_FS
55140 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55141 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55142 +#endif
55143 +
55144 +static inline dev_t __get_dev(const struct dentry *dentry)
55145 +{
55146 +#ifdef CONFIG_BTRFS_FS
55147 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55148 + return get_btrfs_dev_from_inode(dentry->d_inode);
55149 + else
55150 +#endif
55151 + return dentry->d_inode->i_sb->s_dev;
55152 +}
55153 +
55154 +int
55155 +gr_init_uidset(void)
55156 +{
55157 + uid_set =
55158 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55159 + uid_used = 0;
55160 +
55161 + return uid_set ? 1 : 0;
55162 +}
55163 +
55164 +void
55165 +gr_free_uidset(void)
55166 +{
55167 + if (uid_set)
55168 + kfree(uid_set);
55169 +
55170 + return;
55171 +}
55172 +
55173 +int
55174 +gr_find_uid(const uid_t uid)
55175 +{
55176 + struct crash_uid *tmp = uid_set;
55177 + uid_t buid;
55178 + int low = 0, high = uid_used - 1, mid;
55179 +
55180 + while (high >= low) {
55181 + mid = (low + high) >> 1;
55182 + buid = tmp[mid].uid;
55183 + if (buid == uid)
55184 + return mid;
55185 + if (buid > uid)
55186 + high = mid - 1;
55187 + if (buid < uid)
55188 + low = mid + 1;
55189 + }
55190 +
55191 + return -1;
55192 +}
55193 +
55194 +static __inline__ void
55195 +gr_insertsort(void)
55196 +{
55197 + unsigned short i, j;
55198 + struct crash_uid index;
55199 +
55200 + for (i = 1; i < uid_used; i++) {
55201 + index = uid_set[i];
55202 + j = i;
55203 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55204 + uid_set[j] = uid_set[j - 1];
55205 + j--;
55206 + }
55207 + uid_set[j] = index;
55208 + }
55209 +
55210 + return;
55211 +}
55212 +
55213 +static __inline__ void
55214 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55215 +{
55216 + int loc;
55217 +
55218 + if (uid_used == GR_UIDTABLE_MAX)
55219 + return;
55220 +
55221 + loc = gr_find_uid(uid);
55222 +
55223 + if (loc >= 0) {
55224 + uid_set[loc].expires = expires;
55225 + return;
55226 + }
55227 +
55228 + uid_set[uid_used].uid = uid;
55229 + uid_set[uid_used].expires = expires;
55230 + uid_used++;
55231 +
55232 + gr_insertsort();
55233 +
55234 + return;
55235 +}
55236 +
55237 +void
55238 +gr_remove_uid(const unsigned short loc)
55239 +{
55240 + unsigned short i;
55241 +
55242 + for (i = loc + 1; i < uid_used; i++)
55243 + uid_set[i - 1] = uid_set[i];
55244 +
55245 + uid_used--;
55246 +
55247 + return;
55248 +}
55249 +
55250 +int
55251 +gr_check_crash_uid(const uid_t uid)
55252 +{
55253 + int loc;
55254 + int ret = 0;
55255 +
55256 + if (unlikely(!gr_acl_is_enabled()))
55257 + return 0;
55258 +
55259 + spin_lock(&gr_uid_lock);
55260 + loc = gr_find_uid(uid);
55261 +
55262 + if (loc < 0)
55263 + goto out_unlock;
55264 +
55265 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55266 + gr_remove_uid(loc);
55267 + else
55268 + ret = 1;
55269 +
55270 +out_unlock:
55271 + spin_unlock(&gr_uid_lock);
55272 + return ret;
55273 +}
55274 +
55275 +static __inline__ int
55276 +proc_is_setxid(const struct cred *cred)
55277 +{
55278 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55279 + cred->uid != cred->fsuid)
55280 + return 1;
55281 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55282 + cred->gid != cred->fsgid)
55283 + return 1;
55284 +
55285 + return 0;
55286 +}
55287 +
55288 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55289 +
55290 +void
55291 +gr_handle_crash(struct task_struct *task, const int sig)
55292 +{
55293 + struct acl_subject_label *curr;
55294 + struct task_struct *tsk, *tsk2;
55295 + const struct cred *cred;
55296 + const struct cred *cred2;
55297 +
55298 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55299 + return;
55300 +
55301 + if (unlikely(!gr_acl_is_enabled()))
55302 + return;
55303 +
55304 + curr = task->acl;
55305 +
55306 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55307 + return;
55308 +
55309 + if (time_before_eq(curr->expires, get_seconds())) {
55310 + curr->expires = 0;
55311 + curr->crashes = 0;
55312 + }
55313 +
55314 + curr->crashes++;
55315 +
55316 + if (!curr->expires)
55317 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55318 +
55319 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55320 + time_after(curr->expires, get_seconds())) {
55321 + rcu_read_lock();
55322 + cred = __task_cred(task);
55323 + if (cred->uid && proc_is_setxid(cred)) {
55324 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55325 + spin_lock(&gr_uid_lock);
55326 + gr_insert_uid(cred->uid, curr->expires);
55327 + spin_unlock(&gr_uid_lock);
55328 + curr->expires = 0;
55329 + curr->crashes = 0;
55330 + read_lock(&tasklist_lock);
55331 + do_each_thread(tsk2, tsk) {
55332 + cred2 = __task_cred(tsk);
55333 + if (tsk != task && cred2->uid == cred->uid)
55334 + gr_fake_force_sig(SIGKILL, tsk);
55335 + } while_each_thread(tsk2, tsk);
55336 + read_unlock(&tasklist_lock);
55337 + } else {
55338 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55339 + read_lock(&tasklist_lock);
55340 + read_lock(&grsec_exec_file_lock);
55341 + do_each_thread(tsk2, tsk) {
55342 + if (likely(tsk != task)) {
55343 + // if this thread has the same subject as the one that triggered
55344 + // RES_CRASH and it's the same binary, kill it
55345 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55346 + gr_fake_force_sig(SIGKILL, tsk);
55347 + }
55348 + } while_each_thread(tsk2, tsk);
55349 + read_unlock(&grsec_exec_file_lock);
55350 + read_unlock(&tasklist_lock);
55351 + }
55352 + rcu_read_unlock();
55353 + }
55354 +
55355 + return;
55356 +}
55357 +
55358 +int
55359 +gr_check_crash_exec(const struct file *filp)
55360 +{
55361 + struct acl_subject_label *curr;
55362 +
55363 + if (unlikely(!gr_acl_is_enabled()))
55364 + return 0;
55365 +
55366 + read_lock(&gr_inode_lock);
55367 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55368 + __get_dev(filp->f_path.dentry),
55369 + current->role);
55370 + read_unlock(&gr_inode_lock);
55371 +
55372 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55373 + (!curr->crashes && !curr->expires))
55374 + return 0;
55375 +
55376 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55377 + time_after(curr->expires, get_seconds()))
55378 + return 1;
55379 + else if (time_before_eq(curr->expires, get_seconds())) {
55380 + curr->crashes = 0;
55381 + curr->expires = 0;
55382 + }
55383 +
55384 + return 0;
55385 +}
55386 +
55387 +void
55388 +gr_handle_alertkill(struct task_struct *task)
55389 +{
55390 + struct acl_subject_label *curracl;
55391 + __u32 curr_ip;
55392 + struct task_struct *p, *p2;
55393 +
55394 + if (unlikely(!gr_acl_is_enabled()))
55395 + return;
55396 +
55397 + curracl = task->acl;
55398 + curr_ip = task->signal->curr_ip;
55399 +
55400 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55401 + read_lock(&tasklist_lock);
55402 + do_each_thread(p2, p) {
55403 + if (p->signal->curr_ip == curr_ip)
55404 + gr_fake_force_sig(SIGKILL, p);
55405 + } while_each_thread(p2, p);
55406 + read_unlock(&tasklist_lock);
55407 + } else if (curracl->mode & GR_KILLPROC)
55408 + gr_fake_force_sig(SIGKILL, task);
55409 +
55410 + return;
55411 +}
55412 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55413 new file mode 100644
55414 index 0000000..9d83a69
55415 --- /dev/null
55416 +++ b/grsecurity/gracl_shm.c
55417 @@ -0,0 +1,40 @@
55418 +#include <linux/kernel.h>
55419 +#include <linux/mm.h>
55420 +#include <linux/sched.h>
55421 +#include <linux/file.h>
55422 +#include <linux/ipc.h>
55423 +#include <linux/gracl.h>
55424 +#include <linux/grsecurity.h>
55425 +#include <linux/grinternal.h>
55426 +
55427 +int
55428 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55429 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55430 +{
55431 + struct task_struct *task;
55432 +
55433 + if (!gr_acl_is_enabled())
55434 + return 1;
55435 +
55436 + rcu_read_lock();
55437 + read_lock(&tasklist_lock);
55438 +
55439 + task = find_task_by_vpid(shm_cprid);
55440 +
55441 + if (unlikely(!task))
55442 + task = find_task_by_vpid(shm_lapid);
55443 +
55444 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55445 + (task->pid == shm_lapid)) &&
55446 + (task->acl->mode & GR_PROTSHM) &&
55447 + (task->acl != current->acl))) {
55448 + read_unlock(&tasklist_lock);
55449 + rcu_read_unlock();
55450 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55451 + return 0;
55452 + }
55453 + read_unlock(&tasklist_lock);
55454 + rcu_read_unlock();
55455 +
55456 + return 1;
55457 +}
55458 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55459 new file mode 100644
55460 index 0000000..bc0be01
55461 --- /dev/null
55462 +++ b/grsecurity/grsec_chdir.c
55463 @@ -0,0 +1,19 @@
55464 +#include <linux/kernel.h>
55465 +#include <linux/sched.h>
55466 +#include <linux/fs.h>
55467 +#include <linux/file.h>
55468 +#include <linux/grsecurity.h>
55469 +#include <linux/grinternal.h>
55470 +
55471 +void
55472 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55473 +{
55474 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55475 + if ((grsec_enable_chdir && grsec_enable_group &&
55476 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55477 + !grsec_enable_group)) {
55478 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55479 + }
55480 +#endif
55481 + return;
55482 +}
55483 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55484 new file mode 100644
55485 index 0000000..9807ee2
55486 --- /dev/null
55487 +++ b/grsecurity/grsec_chroot.c
55488 @@ -0,0 +1,368 @@
55489 +#include <linux/kernel.h>
55490 +#include <linux/module.h>
55491 +#include <linux/sched.h>
55492 +#include <linux/file.h>
55493 +#include <linux/fs.h>
55494 +#include <linux/mount.h>
55495 +#include <linux/types.h>
55496 +#include "../fs/mount.h"
55497 +#include <linux/grsecurity.h>
55498 +#include <linux/grinternal.h>
55499 +
55500 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55501 +{
55502 +#ifdef CONFIG_GRKERNSEC
55503 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55504 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55505 + task->gr_is_chrooted = 1;
55506 + else
55507 + task->gr_is_chrooted = 0;
55508 +
55509 + task->gr_chroot_dentry = path->dentry;
55510 +#endif
55511 + return;
55512 +}
55513 +
55514 +void gr_clear_chroot_entries(struct task_struct *task)
55515 +{
55516 +#ifdef CONFIG_GRKERNSEC
55517 + task->gr_is_chrooted = 0;
55518 + task->gr_chroot_dentry = NULL;
55519 +#endif
55520 + return;
55521 +}
55522 +
55523 +int
55524 +gr_handle_chroot_unix(const pid_t pid)
55525 +{
55526 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55527 + struct task_struct *p;
55528 +
55529 + if (unlikely(!grsec_enable_chroot_unix))
55530 + return 1;
55531 +
55532 + if (likely(!proc_is_chrooted(current)))
55533 + return 1;
55534 +
55535 + rcu_read_lock();
55536 + read_lock(&tasklist_lock);
55537 + p = find_task_by_vpid_unrestricted(pid);
55538 + if (unlikely(p && !have_same_root(current, p))) {
55539 + read_unlock(&tasklist_lock);
55540 + rcu_read_unlock();
55541 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55542 + return 0;
55543 + }
55544 + read_unlock(&tasklist_lock);
55545 + rcu_read_unlock();
55546 +#endif
55547 + return 1;
55548 +}
55549 +
55550 +int
55551 +gr_handle_chroot_nice(void)
55552 +{
55553 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55554 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55555 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55556 + return -EPERM;
55557 + }
55558 +#endif
55559 + return 0;
55560 +}
55561 +
55562 +int
55563 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55564 +{
55565 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55566 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55567 + && proc_is_chrooted(current)) {
55568 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55569 + return -EACCES;
55570 + }
55571 +#endif
55572 + return 0;
55573 +}
55574 +
55575 +int
55576 +gr_handle_chroot_rawio(const struct inode *inode)
55577 +{
55578 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55579 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55580 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55581 + return 1;
55582 +#endif
55583 + return 0;
55584 +}
55585 +
55586 +int
55587 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55588 +{
55589 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55590 + struct task_struct *p;
55591 + int ret = 0;
55592 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55593 + return ret;
55594 +
55595 + read_lock(&tasklist_lock);
55596 + do_each_pid_task(pid, type, p) {
55597 + if (!have_same_root(current, p)) {
55598 + ret = 1;
55599 + goto out;
55600 + }
55601 + } while_each_pid_task(pid, type, p);
55602 +out:
55603 + read_unlock(&tasklist_lock);
55604 + return ret;
55605 +#endif
55606 + return 0;
55607 +}
55608 +
55609 +int
55610 +gr_pid_is_chrooted(struct task_struct *p)
55611 +{
55612 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55613 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55614 + return 0;
55615 +
55616 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55617 + !have_same_root(current, p)) {
55618 + return 1;
55619 + }
55620 +#endif
55621 + return 0;
55622 +}
55623 +
55624 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55625 +
55626 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55627 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55628 +{
55629 + struct path path, currentroot;
55630 + int ret = 0;
55631 +
55632 + path.dentry = (struct dentry *)u_dentry;
55633 + path.mnt = (struct vfsmount *)u_mnt;
55634 + get_fs_root(current->fs, &currentroot);
55635 + if (path_is_under(&path, &currentroot))
55636 + ret = 1;
55637 + path_put(&currentroot);
55638 +
55639 + return ret;
55640 +}
55641 +#endif
55642 +
55643 +int
55644 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55645 +{
55646 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55647 + if (!grsec_enable_chroot_fchdir)
55648 + return 1;
55649 +
55650 + if (!proc_is_chrooted(current))
55651 + return 1;
55652 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55653 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55654 + return 0;
55655 + }
55656 +#endif
55657 + return 1;
55658 +}
55659 +
55660 +int
55661 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55662 + const time_t shm_createtime)
55663 +{
55664 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55665 + struct task_struct *p;
55666 + time_t starttime;
55667 +
55668 + if (unlikely(!grsec_enable_chroot_shmat))
55669 + return 1;
55670 +
55671 + if (likely(!proc_is_chrooted(current)))
55672 + return 1;
55673 +
55674 + rcu_read_lock();
55675 + read_lock(&tasklist_lock);
55676 +
55677 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55678 + starttime = p->start_time.tv_sec;
55679 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55680 + if (have_same_root(current, p)) {
55681 + goto allow;
55682 + } else {
55683 + read_unlock(&tasklist_lock);
55684 + rcu_read_unlock();
55685 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55686 + return 0;
55687 + }
55688 + }
55689 + /* creator exited, pid reuse, fall through to next check */
55690 + }
55691 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
55692 + if (unlikely(!have_same_root(current, p))) {
55693 + read_unlock(&tasklist_lock);
55694 + rcu_read_unlock();
55695 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55696 + return 0;
55697 + }
55698 + }
55699 +
55700 +allow:
55701 + read_unlock(&tasklist_lock);
55702 + rcu_read_unlock();
55703 +#endif
55704 + return 1;
55705 +}
55706 +
55707 +void
55708 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
55709 +{
55710 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55711 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
55712 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
55713 +#endif
55714 + return;
55715 +}
55716 +
55717 +int
55718 +gr_handle_chroot_mknod(const struct dentry *dentry,
55719 + const struct vfsmount *mnt, const int mode)
55720 +{
55721 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55722 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
55723 + proc_is_chrooted(current)) {
55724 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
55725 + return -EPERM;
55726 + }
55727 +#endif
55728 + return 0;
55729 +}
55730 +
55731 +int
55732 +gr_handle_chroot_mount(const struct dentry *dentry,
55733 + const struct vfsmount *mnt, const char *dev_name)
55734 +{
55735 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55736 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
55737 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
55738 + return -EPERM;
55739 + }
55740 +#endif
55741 + return 0;
55742 +}
55743 +
55744 +int
55745 +gr_handle_chroot_pivot(void)
55746 +{
55747 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55748 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
55749 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
55750 + return -EPERM;
55751 + }
55752 +#endif
55753 + return 0;
55754 +}
55755 +
55756 +int
55757 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
55758 +{
55759 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55760 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
55761 + !gr_is_outside_chroot(dentry, mnt)) {
55762 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
55763 + return -EPERM;
55764 + }
55765 +#endif
55766 + return 0;
55767 +}
55768 +
55769 +extern const char *captab_log[];
55770 +extern int captab_log_entries;
55771 +
55772 +int
55773 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55774 +{
55775 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55776 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
55777 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55778 + if (cap_raised(chroot_caps, cap)) {
55779 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
55780 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
55781 + }
55782 + return 0;
55783 + }
55784 + }
55785 +#endif
55786 + return 1;
55787 +}
55788 +
55789 +int
55790 +gr_chroot_is_capable(const int cap)
55791 +{
55792 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55793 + return gr_task_chroot_is_capable(current, current_cred(), cap);
55794 +#endif
55795 + return 1;
55796 +}
55797 +
55798 +int
55799 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
55800 +{
55801 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55802 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
55803 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55804 + if (cap_raised(chroot_caps, cap)) {
55805 + return 0;
55806 + }
55807 + }
55808 +#endif
55809 + return 1;
55810 +}
55811 +
55812 +int
55813 +gr_chroot_is_capable_nolog(const int cap)
55814 +{
55815 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55816 + return gr_task_chroot_is_capable_nolog(current, cap);
55817 +#endif
55818 + return 1;
55819 +}
55820 +
55821 +int
55822 +gr_handle_chroot_sysctl(const int op)
55823 +{
55824 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55825 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
55826 + proc_is_chrooted(current))
55827 + return -EACCES;
55828 +#endif
55829 + return 0;
55830 +}
55831 +
55832 +void
55833 +gr_handle_chroot_chdir(struct path *path)
55834 +{
55835 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55836 + if (grsec_enable_chroot_chdir)
55837 + set_fs_pwd(current->fs, path);
55838 +#endif
55839 + return;
55840 +}
55841 +
55842 +int
55843 +gr_handle_chroot_chmod(const struct dentry *dentry,
55844 + const struct vfsmount *mnt, const int mode)
55845 +{
55846 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55847 + /* allow chmod +s on directories, but not files */
55848 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
55849 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
55850 + proc_is_chrooted(current)) {
55851 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
55852 + return -EPERM;
55853 + }
55854 +#endif
55855 + return 0;
55856 +}
55857 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
55858 new file mode 100644
55859 index 0000000..213ad8b
55860 --- /dev/null
55861 +++ b/grsecurity/grsec_disabled.c
55862 @@ -0,0 +1,437 @@
55863 +#include <linux/kernel.h>
55864 +#include <linux/module.h>
55865 +#include <linux/sched.h>
55866 +#include <linux/file.h>
55867 +#include <linux/fs.h>
55868 +#include <linux/kdev_t.h>
55869 +#include <linux/net.h>
55870 +#include <linux/in.h>
55871 +#include <linux/ip.h>
55872 +#include <linux/skbuff.h>
55873 +#include <linux/sysctl.h>
55874 +
55875 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55876 +void
55877 +pax_set_initial_flags(struct linux_binprm *bprm)
55878 +{
55879 + return;
55880 +}
55881 +#endif
55882 +
55883 +#ifdef CONFIG_SYSCTL
55884 +__u32
55885 +gr_handle_sysctl(const struct ctl_table * table, const int op)
55886 +{
55887 + return 0;
55888 +}
55889 +#endif
55890 +
55891 +#ifdef CONFIG_TASKSTATS
55892 +int gr_is_taskstats_denied(int pid)
55893 +{
55894 + return 0;
55895 +}
55896 +#endif
55897 +
55898 +int
55899 +gr_acl_is_enabled(void)
55900 +{
55901 + return 0;
55902 +}
55903 +
55904 +void
55905 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
55906 +{
55907 + return;
55908 +}
55909 +
55910 +int
55911 +gr_handle_rawio(const struct inode *inode)
55912 +{
55913 + return 0;
55914 +}
55915 +
55916 +void
55917 +gr_acl_handle_psacct(struct task_struct *task, const long code)
55918 +{
55919 + return;
55920 +}
55921 +
55922 +int
55923 +gr_handle_ptrace(struct task_struct *task, const long request)
55924 +{
55925 + return 0;
55926 +}
55927 +
55928 +int
55929 +gr_handle_proc_ptrace(struct task_struct *task)
55930 +{
55931 + return 0;
55932 +}
55933 +
55934 +void
55935 +gr_learn_resource(const struct task_struct *task,
55936 + const int res, const unsigned long wanted, const int gt)
55937 +{
55938 + return;
55939 +}
55940 +
55941 +int
55942 +gr_set_acls(const int type)
55943 +{
55944 + return 0;
55945 +}
55946 +
55947 +int
55948 +gr_check_hidden_task(const struct task_struct *tsk)
55949 +{
55950 + return 0;
55951 +}
55952 +
55953 +int
55954 +gr_check_protected_task(const struct task_struct *task)
55955 +{
55956 + return 0;
55957 +}
55958 +
55959 +int
55960 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55961 +{
55962 + return 0;
55963 +}
55964 +
55965 +void
55966 +gr_copy_label(struct task_struct *tsk)
55967 +{
55968 + return;
55969 +}
55970 +
55971 +void
55972 +gr_set_pax_flags(struct task_struct *task)
55973 +{
55974 + return;
55975 +}
55976 +
55977 +int
55978 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55979 + const int unsafe_share)
55980 +{
55981 + return 0;
55982 +}
55983 +
55984 +void
55985 +gr_handle_delete(const ino_t ino, const dev_t dev)
55986 +{
55987 + return;
55988 +}
55989 +
55990 +void
55991 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
55992 +{
55993 + return;
55994 +}
55995 +
55996 +void
55997 +gr_handle_crash(struct task_struct *task, const int sig)
55998 +{
55999 + return;
56000 +}
56001 +
56002 +int
56003 +gr_check_crash_exec(const struct file *filp)
56004 +{
56005 + return 0;
56006 +}
56007 +
56008 +int
56009 +gr_check_crash_uid(const uid_t uid)
56010 +{
56011 + return 0;
56012 +}
56013 +
56014 +void
56015 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56016 + struct dentry *old_dentry,
56017 + struct dentry *new_dentry,
56018 + struct vfsmount *mnt, const __u8 replace)
56019 +{
56020 + return;
56021 +}
56022 +
56023 +int
56024 +gr_search_socket(const int family, const int type, const int protocol)
56025 +{
56026 + return 1;
56027 +}
56028 +
56029 +int
56030 +gr_search_connectbind(const int mode, const struct socket *sock,
56031 + const struct sockaddr_in *addr)
56032 +{
56033 + return 0;
56034 +}
56035 +
56036 +void
56037 +gr_handle_alertkill(struct task_struct *task)
56038 +{
56039 + return;
56040 +}
56041 +
56042 +__u32
56043 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56044 +{
56045 + return 1;
56046 +}
56047 +
56048 +__u32
56049 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56050 + const struct vfsmount * mnt)
56051 +{
56052 + return 1;
56053 +}
56054 +
56055 +__u32
56056 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56057 + int acc_mode)
56058 +{
56059 + return 1;
56060 +}
56061 +
56062 +__u32
56063 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56064 +{
56065 + return 1;
56066 +}
56067 +
56068 +__u32
56069 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56070 +{
56071 + return 1;
56072 +}
56073 +
56074 +int
56075 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56076 + unsigned int *vm_flags)
56077 +{
56078 + return 1;
56079 +}
56080 +
56081 +__u32
56082 +gr_acl_handle_truncate(const struct dentry * dentry,
56083 + const struct vfsmount * mnt)
56084 +{
56085 + return 1;
56086 +}
56087 +
56088 +__u32
56089 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56090 +{
56091 + return 1;
56092 +}
56093 +
56094 +__u32
56095 +gr_acl_handle_access(const struct dentry * dentry,
56096 + const struct vfsmount * mnt, const int fmode)
56097 +{
56098 + return 1;
56099 +}
56100 +
56101 +__u32
56102 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56103 + umode_t *mode)
56104 +{
56105 + return 1;
56106 +}
56107 +
56108 +__u32
56109 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56110 +{
56111 + return 1;
56112 +}
56113 +
56114 +__u32
56115 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56116 +{
56117 + return 1;
56118 +}
56119 +
56120 +void
56121 +grsecurity_init(void)
56122 +{
56123 + return;
56124 +}
56125 +
56126 +umode_t gr_acl_umask(void)
56127 +{
56128 + return 0;
56129 +}
56130 +
56131 +__u32
56132 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56133 + const struct dentry * parent_dentry,
56134 + const struct vfsmount * parent_mnt,
56135 + const int mode)
56136 +{
56137 + return 1;
56138 +}
56139 +
56140 +__u32
56141 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56142 + const struct dentry * parent_dentry,
56143 + const struct vfsmount * parent_mnt)
56144 +{
56145 + return 1;
56146 +}
56147 +
56148 +__u32
56149 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56150 + const struct dentry * parent_dentry,
56151 + const struct vfsmount * parent_mnt, const char *from)
56152 +{
56153 + return 1;
56154 +}
56155 +
56156 +__u32
56157 +gr_acl_handle_link(const struct dentry * new_dentry,
56158 + const struct dentry * parent_dentry,
56159 + const struct vfsmount * parent_mnt,
56160 + const struct dentry * old_dentry,
56161 + const struct vfsmount * old_mnt, const char *to)
56162 +{
56163 + return 1;
56164 +}
56165 +
56166 +int
56167 +gr_acl_handle_rename(const struct dentry *new_dentry,
56168 + const struct dentry *parent_dentry,
56169 + const struct vfsmount *parent_mnt,
56170 + const struct dentry *old_dentry,
56171 + const struct inode *old_parent_inode,
56172 + const struct vfsmount *old_mnt, const char *newname)
56173 +{
56174 + return 0;
56175 +}
56176 +
56177 +int
56178 +gr_acl_handle_filldir(const struct file *file, const char *name,
56179 + const int namelen, const ino_t ino)
56180 +{
56181 + return 1;
56182 +}
56183 +
56184 +int
56185 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56186 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56187 +{
56188 + return 1;
56189 +}
56190 +
56191 +int
56192 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56193 +{
56194 + return 0;
56195 +}
56196 +
56197 +int
56198 +gr_search_accept(const struct socket *sock)
56199 +{
56200 + return 0;
56201 +}
56202 +
56203 +int
56204 +gr_search_listen(const struct socket *sock)
56205 +{
56206 + return 0;
56207 +}
56208 +
56209 +int
56210 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56211 +{
56212 + return 0;
56213 +}
56214 +
56215 +__u32
56216 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56217 +{
56218 + return 1;
56219 +}
56220 +
56221 +__u32
56222 +gr_acl_handle_creat(const struct dentry * dentry,
56223 + const struct dentry * p_dentry,
56224 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56225 + const int imode)
56226 +{
56227 + return 1;
56228 +}
56229 +
56230 +void
56231 +gr_acl_handle_exit(void)
56232 +{
56233 + return;
56234 +}
56235 +
56236 +int
56237 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56238 +{
56239 + return 1;
56240 +}
56241 +
56242 +void
56243 +gr_set_role_label(const uid_t uid, const gid_t gid)
56244 +{
56245 + return;
56246 +}
56247 +
56248 +int
56249 +gr_acl_handle_procpidmem(const struct task_struct *task)
56250 +{
56251 + return 0;
56252 +}
56253 +
56254 +int
56255 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56256 +{
56257 + return 0;
56258 +}
56259 +
56260 +int
56261 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56262 +{
56263 + return 0;
56264 +}
56265 +
56266 +void
56267 +gr_set_kernel_label(struct task_struct *task)
56268 +{
56269 + return;
56270 +}
56271 +
56272 +int
56273 +gr_check_user_change(int real, int effective, int fs)
56274 +{
56275 + return 0;
56276 +}
56277 +
56278 +int
56279 +gr_check_group_change(int real, int effective, int fs)
56280 +{
56281 + return 0;
56282 +}
56283 +
56284 +int gr_acl_enable_at_secure(void)
56285 +{
56286 + return 0;
56287 +}
56288 +
56289 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56290 +{
56291 + return dentry->d_inode->i_sb->s_dev;
56292 +}
56293 +
56294 +EXPORT_SYMBOL(gr_learn_resource);
56295 +EXPORT_SYMBOL(gr_set_kernel_label);
56296 +#ifdef CONFIG_SECURITY
56297 +EXPORT_SYMBOL(gr_check_user_change);
56298 +EXPORT_SYMBOL(gr_check_group_change);
56299 +#endif
56300 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56301 new file mode 100644
56302 index 0000000..abfa971
56303 --- /dev/null
56304 +++ b/grsecurity/grsec_exec.c
56305 @@ -0,0 +1,174 @@
56306 +#include <linux/kernel.h>
56307 +#include <linux/sched.h>
56308 +#include <linux/file.h>
56309 +#include <linux/binfmts.h>
56310 +#include <linux/fs.h>
56311 +#include <linux/types.h>
56312 +#include <linux/grdefs.h>
56313 +#include <linux/grsecurity.h>
56314 +#include <linux/grinternal.h>
56315 +#include <linux/capability.h>
56316 +#include <linux/module.h>
56317 +
56318 +#include <asm/uaccess.h>
56319 +
56320 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56321 +static char gr_exec_arg_buf[132];
56322 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56323 +#endif
56324 +
56325 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56326 +
56327 +void
56328 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56329 +{
56330 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56331 + char *grarg = gr_exec_arg_buf;
56332 + unsigned int i, x, execlen = 0;
56333 + char c;
56334 +
56335 + if (!((grsec_enable_execlog && grsec_enable_group &&
56336 + in_group_p(grsec_audit_gid))
56337 + || (grsec_enable_execlog && !grsec_enable_group)))
56338 + return;
56339 +
56340 + mutex_lock(&gr_exec_arg_mutex);
56341 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56342 +
56343 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56344 + const char __user *p;
56345 + unsigned int len;
56346 +
56347 + p = get_user_arg_ptr(argv, i);
56348 + if (IS_ERR(p))
56349 + goto log;
56350 +
56351 + len = strnlen_user(p, 128 - execlen);
56352 + if (len > 128 - execlen)
56353 + len = 128 - execlen;
56354 + else if (len > 0)
56355 + len--;
56356 + if (copy_from_user(grarg + execlen, p, len))
56357 + goto log;
56358 +
56359 + /* rewrite unprintable characters */
56360 + for (x = 0; x < len; x++) {
56361 + c = *(grarg + execlen + x);
56362 + if (c < 32 || c > 126)
56363 + *(grarg + execlen + x) = ' ';
56364 + }
56365 +
56366 + execlen += len;
56367 + *(grarg + execlen) = ' ';
56368 + *(grarg + execlen + 1) = '\0';
56369 + execlen++;
56370 + }
56371 +
56372 + log:
56373 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56374 + bprm->file->f_path.mnt, grarg);
56375 + mutex_unlock(&gr_exec_arg_mutex);
56376 +#endif
56377 + return;
56378 +}
56379 +
56380 +#ifdef CONFIG_GRKERNSEC
56381 +extern int gr_acl_is_capable(const int cap);
56382 +extern int gr_acl_is_capable_nolog(const int cap);
56383 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56384 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56385 +extern int gr_chroot_is_capable(const int cap);
56386 +extern int gr_chroot_is_capable_nolog(const int cap);
56387 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56388 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56389 +#endif
56390 +
56391 +const char *captab_log[] = {
56392 + "CAP_CHOWN",
56393 + "CAP_DAC_OVERRIDE",
56394 + "CAP_DAC_READ_SEARCH",
56395 + "CAP_FOWNER",
56396 + "CAP_FSETID",
56397 + "CAP_KILL",
56398 + "CAP_SETGID",
56399 + "CAP_SETUID",
56400 + "CAP_SETPCAP",
56401 + "CAP_LINUX_IMMUTABLE",
56402 + "CAP_NET_BIND_SERVICE",
56403 + "CAP_NET_BROADCAST",
56404 + "CAP_NET_ADMIN",
56405 + "CAP_NET_RAW",
56406 + "CAP_IPC_LOCK",
56407 + "CAP_IPC_OWNER",
56408 + "CAP_SYS_MODULE",
56409 + "CAP_SYS_RAWIO",
56410 + "CAP_SYS_CHROOT",
56411 + "CAP_SYS_PTRACE",
56412 + "CAP_SYS_PACCT",
56413 + "CAP_SYS_ADMIN",
56414 + "CAP_SYS_BOOT",
56415 + "CAP_SYS_NICE",
56416 + "CAP_SYS_RESOURCE",
56417 + "CAP_SYS_TIME",
56418 + "CAP_SYS_TTY_CONFIG",
56419 + "CAP_MKNOD",
56420 + "CAP_LEASE",
56421 + "CAP_AUDIT_WRITE",
56422 + "CAP_AUDIT_CONTROL",
56423 + "CAP_SETFCAP",
56424 + "CAP_MAC_OVERRIDE",
56425 + "CAP_MAC_ADMIN",
56426 + "CAP_SYSLOG",
56427 + "CAP_WAKE_ALARM"
56428 +};
56429 +
56430 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56431 +
56432 +int gr_is_capable(const int cap)
56433 +{
56434 +#ifdef CONFIG_GRKERNSEC
56435 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56436 + return 1;
56437 + return 0;
56438 +#else
56439 + return 1;
56440 +#endif
56441 +}
56442 +
56443 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56444 +{
56445 +#ifdef CONFIG_GRKERNSEC
56446 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56447 + return 1;
56448 + return 0;
56449 +#else
56450 + return 1;
56451 +#endif
56452 +}
56453 +
56454 +int gr_is_capable_nolog(const int cap)
56455 +{
56456 +#ifdef CONFIG_GRKERNSEC
56457 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56458 + return 1;
56459 + return 0;
56460 +#else
56461 + return 1;
56462 +#endif
56463 +}
56464 +
56465 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56466 +{
56467 +#ifdef CONFIG_GRKERNSEC
56468 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56469 + return 1;
56470 + return 0;
56471 +#else
56472 + return 1;
56473 +#endif
56474 +}
56475 +
56476 +EXPORT_SYMBOL(gr_is_capable);
56477 +EXPORT_SYMBOL(gr_is_capable_nolog);
56478 +EXPORT_SYMBOL(gr_task_is_capable);
56479 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
56480 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56481 new file mode 100644
56482 index 0000000..d3ee748
56483 --- /dev/null
56484 +++ b/grsecurity/grsec_fifo.c
56485 @@ -0,0 +1,24 @@
56486 +#include <linux/kernel.h>
56487 +#include <linux/sched.h>
56488 +#include <linux/fs.h>
56489 +#include <linux/file.h>
56490 +#include <linux/grinternal.h>
56491 +
56492 +int
56493 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56494 + const struct dentry *dir, const int flag, const int acc_mode)
56495 +{
56496 +#ifdef CONFIG_GRKERNSEC_FIFO
56497 + const struct cred *cred = current_cred();
56498 +
56499 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56500 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56501 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56502 + (cred->fsuid != dentry->d_inode->i_uid)) {
56503 + if (!inode_permission(dentry->d_inode, acc_mode))
56504 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56505 + return -EACCES;
56506 + }
56507 +#endif
56508 + return 0;
56509 +}
56510 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56511 new file mode 100644
56512 index 0000000..8ca18bf
56513 --- /dev/null
56514 +++ b/grsecurity/grsec_fork.c
56515 @@ -0,0 +1,23 @@
56516 +#include <linux/kernel.h>
56517 +#include <linux/sched.h>
56518 +#include <linux/grsecurity.h>
56519 +#include <linux/grinternal.h>
56520 +#include <linux/errno.h>
56521 +
56522 +void
56523 +gr_log_forkfail(const int retval)
56524 +{
56525 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56526 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56527 + switch (retval) {
56528 + case -EAGAIN:
56529 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56530 + break;
56531 + case -ENOMEM:
56532 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56533 + break;
56534 + }
56535 + }
56536 +#endif
56537 + return;
56538 +}
56539 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56540 new file mode 100644
56541 index 0000000..01ddde4
56542 --- /dev/null
56543 +++ b/grsecurity/grsec_init.c
56544 @@ -0,0 +1,277 @@
56545 +#include <linux/kernel.h>
56546 +#include <linux/sched.h>
56547 +#include <linux/mm.h>
56548 +#include <linux/gracl.h>
56549 +#include <linux/slab.h>
56550 +#include <linux/vmalloc.h>
56551 +#include <linux/percpu.h>
56552 +#include <linux/module.h>
56553 +
56554 +int grsec_enable_ptrace_readexec;
56555 +int grsec_enable_setxid;
56556 +int grsec_enable_brute;
56557 +int grsec_enable_link;
56558 +int grsec_enable_dmesg;
56559 +int grsec_enable_harden_ptrace;
56560 +int grsec_enable_fifo;
56561 +int grsec_enable_execlog;
56562 +int grsec_enable_signal;
56563 +int grsec_enable_forkfail;
56564 +int grsec_enable_audit_ptrace;
56565 +int grsec_enable_time;
56566 +int grsec_enable_audit_textrel;
56567 +int grsec_enable_group;
56568 +int grsec_audit_gid;
56569 +int grsec_enable_chdir;
56570 +int grsec_enable_mount;
56571 +int grsec_enable_rofs;
56572 +int grsec_enable_chroot_findtask;
56573 +int grsec_enable_chroot_mount;
56574 +int grsec_enable_chroot_shmat;
56575 +int grsec_enable_chroot_fchdir;
56576 +int grsec_enable_chroot_double;
56577 +int grsec_enable_chroot_pivot;
56578 +int grsec_enable_chroot_chdir;
56579 +int grsec_enable_chroot_chmod;
56580 +int grsec_enable_chroot_mknod;
56581 +int grsec_enable_chroot_nice;
56582 +int grsec_enable_chroot_execlog;
56583 +int grsec_enable_chroot_caps;
56584 +int grsec_enable_chroot_sysctl;
56585 +int grsec_enable_chroot_unix;
56586 +int grsec_enable_tpe;
56587 +int grsec_tpe_gid;
56588 +int grsec_enable_blackhole;
56589 +#ifdef CONFIG_IPV6_MODULE
56590 +EXPORT_SYMBOL(grsec_enable_blackhole);
56591 +#endif
56592 +int grsec_lastack_retries;
56593 +int grsec_enable_tpe_all;
56594 +int grsec_enable_tpe_invert;
56595 +int grsec_enable_socket_all;
56596 +int grsec_socket_all_gid;
56597 +int grsec_enable_socket_client;
56598 +int grsec_socket_client_gid;
56599 +int grsec_enable_socket_server;
56600 +int grsec_socket_server_gid;
56601 +int grsec_resource_logging;
56602 +int grsec_disable_privio;
56603 +int grsec_enable_log_rwxmaps;
56604 +int grsec_lock;
56605 +
56606 +DEFINE_SPINLOCK(grsec_alert_lock);
56607 +unsigned long grsec_alert_wtime = 0;
56608 +unsigned long grsec_alert_fyet = 0;
56609 +
56610 +DEFINE_SPINLOCK(grsec_audit_lock);
56611 +
56612 +DEFINE_RWLOCK(grsec_exec_file_lock);
56613 +
56614 +char *gr_shared_page[4];
56615 +
56616 +char *gr_alert_log_fmt;
56617 +char *gr_audit_log_fmt;
56618 +char *gr_alert_log_buf;
56619 +char *gr_audit_log_buf;
56620 +
56621 +extern struct gr_arg *gr_usermode;
56622 +extern unsigned char *gr_system_salt;
56623 +extern unsigned char *gr_system_sum;
56624 +
56625 +void __init
56626 +grsecurity_init(void)
56627 +{
56628 + int j;
56629 + /* create the per-cpu shared pages */
56630 +
56631 +#ifdef CONFIG_X86
56632 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56633 +#endif
56634 +
56635 + for (j = 0; j < 4; j++) {
56636 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56637 + if (gr_shared_page[j] == NULL) {
56638 + panic("Unable to allocate grsecurity shared page");
56639 + return;
56640 + }
56641 + }
56642 +
56643 + /* allocate log buffers */
56644 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56645 + if (!gr_alert_log_fmt) {
56646 + panic("Unable to allocate grsecurity alert log format buffer");
56647 + return;
56648 + }
56649 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56650 + if (!gr_audit_log_fmt) {
56651 + panic("Unable to allocate grsecurity audit log format buffer");
56652 + return;
56653 + }
56654 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56655 + if (!gr_alert_log_buf) {
56656 + panic("Unable to allocate grsecurity alert log buffer");
56657 + return;
56658 + }
56659 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56660 + if (!gr_audit_log_buf) {
56661 + panic("Unable to allocate grsecurity audit log buffer");
56662 + return;
56663 + }
56664 +
56665 + /* allocate memory for authentication structure */
56666 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56667 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56668 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56669 +
56670 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56671 + panic("Unable to allocate grsecurity authentication structure");
56672 + return;
56673 + }
56674 +
56675 +
56676 +#ifdef CONFIG_GRKERNSEC_IO
56677 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56678 + grsec_disable_privio = 1;
56679 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56680 + grsec_disable_privio = 1;
56681 +#else
56682 + grsec_disable_privio = 0;
56683 +#endif
56684 +#endif
56685 +
56686 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56687 + /* for backward compatibility, tpe_invert always defaults to on if
56688 + enabled in the kernel
56689 + */
56690 + grsec_enable_tpe_invert = 1;
56691 +#endif
56692 +
56693 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56694 +#ifndef CONFIG_GRKERNSEC_SYSCTL
56695 + grsec_lock = 1;
56696 +#endif
56697 +
56698 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56699 + grsec_enable_audit_textrel = 1;
56700 +#endif
56701 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56702 + grsec_enable_log_rwxmaps = 1;
56703 +#endif
56704 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56705 + grsec_enable_group = 1;
56706 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
56707 +#endif
56708 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56709 + grsec_enable_ptrace_readexec = 1;
56710 +#endif
56711 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56712 + grsec_enable_chdir = 1;
56713 +#endif
56714 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56715 + grsec_enable_harden_ptrace = 1;
56716 +#endif
56717 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56718 + grsec_enable_mount = 1;
56719 +#endif
56720 +#ifdef CONFIG_GRKERNSEC_LINK
56721 + grsec_enable_link = 1;
56722 +#endif
56723 +#ifdef CONFIG_GRKERNSEC_BRUTE
56724 + grsec_enable_brute = 1;
56725 +#endif
56726 +#ifdef CONFIG_GRKERNSEC_DMESG
56727 + grsec_enable_dmesg = 1;
56728 +#endif
56729 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56730 + grsec_enable_blackhole = 1;
56731 + grsec_lastack_retries = 4;
56732 +#endif
56733 +#ifdef CONFIG_GRKERNSEC_FIFO
56734 + grsec_enable_fifo = 1;
56735 +#endif
56736 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56737 + grsec_enable_execlog = 1;
56738 +#endif
56739 +#ifdef CONFIG_GRKERNSEC_SETXID
56740 + grsec_enable_setxid = 1;
56741 +#endif
56742 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56743 + grsec_enable_signal = 1;
56744 +#endif
56745 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56746 + grsec_enable_forkfail = 1;
56747 +#endif
56748 +#ifdef CONFIG_GRKERNSEC_TIME
56749 + grsec_enable_time = 1;
56750 +#endif
56751 +#ifdef CONFIG_GRKERNSEC_RESLOG
56752 + grsec_resource_logging = 1;
56753 +#endif
56754 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56755 + grsec_enable_chroot_findtask = 1;
56756 +#endif
56757 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56758 + grsec_enable_chroot_unix = 1;
56759 +#endif
56760 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56761 + grsec_enable_chroot_mount = 1;
56762 +#endif
56763 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56764 + grsec_enable_chroot_fchdir = 1;
56765 +#endif
56766 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56767 + grsec_enable_chroot_shmat = 1;
56768 +#endif
56769 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56770 + grsec_enable_audit_ptrace = 1;
56771 +#endif
56772 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56773 + grsec_enable_chroot_double = 1;
56774 +#endif
56775 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56776 + grsec_enable_chroot_pivot = 1;
56777 +#endif
56778 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56779 + grsec_enable_chroot_chdir = 1;
56780 +#endif
56781 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56782 + grsec_enable_chroot_chmod = 1;
56783 +#endif
56784 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56785 + grsec_enable_chroot_mknod = 1;
56786 +#endif
56787 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56788 + grsec_enable_chroot_nice = 1;
56789 +#endif
56790 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56791 + grsec_enable_chroot_execlog = 1;
56792 +#endif
56793 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56794 + grsec_enable_chroot_caps = 1;
56795 +#endif
56796 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56797 + grsec_enable_chroot_sysctl = 1;
56798 +#endif
56799 +#ifdef CONFIG_GRKERNSEC_TPE
56800 + grsec_enable_tpe = 1;
56801 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
56802 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56803 + grsec_enable_tpe_all = 1;
56804 +#endif
56805 +#endif
56806 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56807 + grsec_enable_socket_all = 1;
56808 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
56809 +#endif
56810 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56811 + grsec_enable_socket_client = 1;
56812 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
56813 +#endif
56814 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56815 + grsec_enable_socket_server = 1;
56816 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
56817 +#endif
56818 +#endif
56819 +
56820 + return;
56821 +}
56822 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
56823 new file mode 100644
56824 index 0000000..3efe141
56825 --- /dev/null
56826 +++ b/grsecurity/grsec_link.c
56827 @@ -0,0 +1,43 @@
56828 +#include <linux/kernel.h>
56829 +#include <linux/sched.h>
56830 +#include <linux/fs.h>
56831 +#include <linux/file.h>
56832 +#include <linux/grinternal.h>
56833 +
56834 +int
56835 +gr_handle_follow_link(const struct inode *parent,
56836 + const struct inode *inode,
56837 + const struct dentry *dentry, const struct vfsmount *mnt)
56838 +{
56839 +#ifdef CONFIG_GRKERNSEC_LINK
56840 + const struct cred *cred = current_cred();
56841 +
56842 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
56843 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
56844 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
56845 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
56846 + return -EACCES;
56847 + }
56848 +#endif
56849 + return 0;
56850 +}
56851 +
56852 +int
56853 +gr_handle_hardlink(const struct dentry *dentry,
56854 + const struct vfsmount *mnt,
56855 + struct inode *inode, const int mode, const char *to)
56856 +{
56857 +#ifdef CONFIG_GRKERNSEC_LINK
56858 + const struct cred *cred = current_cred();
56859 +
56860 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
56861 + (!S_ISREG(mode) || (mode & S_ISUID) ||
56862 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
56863 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
56864 + !capable(CAP_FOWNER) && cred->uid) {
56865 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
56866 + return -EPERM;
56867 + }
56868 +#endif
56869 + return 0;
56870 +}
56871 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
56872 new file mode 100644
56873 index 0000000..a45d2e9
56874 --- /dev/null
56875 +++ b/grsecurity/grsec_log.c
56876 @@ -0,0 +1,322 @@
56877 +#include <linux/kernel.h>
56878 +#include <linux/sched.h>
56879 +#include <linux/file.h>
56880 +#include <linux/tty.h>
56881 +#include <linux/fs.h>
56882 +#include <linux/grinternal.h>
56883 +
56884 +#ifdef CONFIG_TREE_PREEMPT_RCU
56885 +#define DISABLE_PREEMPT() preempt_disable()
56886 +#define ENABLE_PREEMPT() preempt_enable()
56887 +#else
56888 +#define DISABLE_PREEMPT()
56889 +#define ENABLE_PREEMPT()
56890 +#endif
56891 +
56892 +#define BEGIN_LOCKS(x) \
56893 + DISABLE_PREEMPT(); \
56894 + rcu_read_lock(); \
56895 + read_lock(&tasklist_lock); \
56896 + read_lock(&grsec_exec_file_lock); \
56897 + if (x != GR_DO_AUDIT) \
56898 + spin_lock(&grsec_alert_lock); \
56899 + else \
56900 + spin_lock(&grsec_audit_lock)
56901 +
56902 +#define END_LOCKS(x) \
56903 + if (x != GR_DO_AUDIT) \
56904 + spin_unlock(&grsec_alert_lock); \
56905 + else \
56906 + spin_unlock(&grsec_audit_lock); \
56907 + read_unlock(&grsec_exec_file_lock); \
56908 + read_unlock(&tasklist_lock); \
56909 + rcu_read_unlock(); \
56910 + ENABLE_PREEMPT(); \
56911 + if (x == GR_DONT_AUDIT) \
56912 + gr_handle_alertkill(current)
56913 +
56914 +enum {
56915 + FLOODING,
56916 + NO_FLOODING
56917 +};
56918 +
56919 +extern char *gr_alert_log_fmt;
56920 +extern char *gr_audit_log_fmt;
56921 +extern char *gr_alert_log_buf;
56922 +extern char *gr_audit_log_buf;
56923 +
56924 +static int gr_log_start(int audit)
56925 +{
56926 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
56927 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
56928 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56929 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
56930 + unsigned long curr_secs = get_seconds();
56931 +
56932 + if (audit == GR_DO_AUDIT)
56933 + goto set_fmt;
56934 +
56935 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
56936 + grsec_alert_wtime = curr_secs;
56937 + grsec_alert_fyet = 0;
56938 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
56939 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
56940 + grsec_alert_fyet++;
56941 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
56942 + grsec_alert_wtime = curr_secs;
56943 + grsec_alert_fyet++;
56944 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
56945 + return FLOODING;
56946 + }
56947 + else return FLOODING;
56948 +
56949 +set_fmt:
56950 +#endif
56951 + memset(buf, 0, PAGE_SIZE);
56952 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
56953 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
56954 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
56955 + } else if (current->signal->curr_ip) {
56956 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
56957 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
56958 + } else if (gr_acl_is_enabled()) {
56959 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
56960 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
56961 + } else {
56962 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
56963 + strcpy(buf, fmt);
56964 + }
56965 +
56966 + return NO_FLOODING;
56967 +}
56968 +
56969 +static void gr_log_middle(int audit, const char *msg, va_list ap)
56970 + __attribute__ ((format (printf, 2, 0)));
56971 +
56972 +static void gr_log_middle(int audit, const char *msg, va_list ap)
56973 +{
56974 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56975 + unsigned int len = strlen(buf);
56976 +
56977 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
56978 +
56979 + return;
56980 +}
56981 +
56982 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
56983 + __attribute__ ((format (printf, 2, 3)));
56984 +
56985 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
56986 +{
56987 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56988 + unsigned int len = strlen(buf);
56989 + va_list ap;
56990 +
56991 + va_start(ap, msg);
56992 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
56993 + va_end(ap);
56994 +
56995 + return;
56996 +}
56997 +
56998 +static void gr_log_end(int audit, int append_default)
56999 +{
57000 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57001 +
57002 + if (append_default) {
57003 + unsigned int len = strlen(buf);
57004 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57005 + }
57006 +
57007 + printk("%s\n", buf);
57008 +
57009 + return;
57010 +}
57011 +
57012 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57013 +{
57014 + int logtype;
57015 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57016 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57017 + void *voidptr = NULL;
57018 + int num1 = 0, num2 = 0;
57019 + unsigned long ulong1 = 0, ulong2 = 0;
57020 + struct dentry *dentry = NULL;
57021 + struct vfsmount *mnt = NULL;
57022 + struct file *file = NULL;
57023 + struct task_struct *task = NULL;
57024 + const struct cred *cred, *pcred;
57025 + va_list ap;
57026 +
57027 + BEGIN_LOCKS(audit);
57028 + logtype = gr_log_start(audit);
57029 + if (logtype == FLOODING) {
57030 + END_LOCKS(audit);
57031 + return;
57032 + }
57033 + va_start(ap, argtypes);
57034 + switch (argtypes) {
57035 + case GR_TTYSNIFF:
57036 + task = va_arg(ap, struct task_struct *);
57037 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57038 + break;
57039 + case GR_SYSCTL_HIDDEN:
57040 + str1 = va_arg(ap, char *);
57041 + gr_log_middle_varargs(audit, msg, result, str1);
57042 + break;
57043 + case GR_RBAC:
57044 + dentry = va_arg(ap, struct dentry *);
57045 + mnt = va_arg(ap, struct vfsmount *);
57046 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57047 + break;
57048 + case GR_RBAC_STR:
57049 + dentry = va_arg(ap, struct dentry *);
57050 + mnt = va_arg(ap, struct vfsmount *);
57051 + str1 = va_arg(ap, char *);
57052 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57053 + break;
57054 + case GR_STR_RBAC:
57055 + str1 = va_arg(ap, char *);
57056 + dentry = va_arg(ap, struct dentry *);
57057 + mnt = va_arg(ap, struct vfsmount *);
57058 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57059 + break;
57060 + case GR_RBAC_MODE2:
57061 + dentry = va_arg(ap, struct dentry *);
57062 + mnt = va_arg(ap, struct vfsmount *);
57063 + str1 = va_arg(ap, char *);
57064 + str2 = va_arg(ap, char *);
57065 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57066 + break;
57067 + case GR_RBAC_MODE3:
57068 + dentry = va_arg(ap, struct dentry *);
57069 + mnt = va_arg(ap, struct vfsmount *);
57070 + str1 = va_arg(ap, char *);
57071 + str2 = va_arg(ap, char *);
57072 + str3 = va_arg(ap, char *);
57073 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57074 + break;
57075 + case GR_FILENAME:
57076 + dentry = va_arg(ap, struct dentry *);
57077 + mnt = va_arg(ap, struct vfsmount *);
57078 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57079 + break;
57080 + case GR_STR_FILENAME:
57081 + str1 = va_arg(ap, char *);
57082 + dentry = va_arg(ap, struct dentry *);
57083 + mnt = va_arg(ap, struct vfsmount *);
57084 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57085 + break;
57086 + case GR_FILENAME_STR:
57087 + dentry = va_arg(ap, struct dentry *);
57088 + mnt = va_arg(ap, struct vfsmount *);
57089 + str1 = va_arg(ap, char *);
57090 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57091 + break;
57092 + case GR_FILENAME_TWO_INT:
57093 + dentry = va_arg(ap, struct dentry *);
57094 + mnt = va_arg(ap, struct vfsmount *);
57095 + num1 = va_arg(ap, int);
57096 + num2 = va_arg(ap, int);
57097 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57098 + break;
57099 + case GR_FILENAME_TWO_INT_STR:
57100 + dentry = va_arg(ap, struct dentry *);
57101 + mnt = va_arg(ap, struct vfsmount *);
57102 + num1 = va_arg(ap, int);
57103 + num2 = va_arg(ap, int);
57104 + str1 = va_arg(ap, char *);
57105 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57106 + break;
57107 + case GR_TEXTREL:
57108 + file = va_arg(ap, struct file *);
57109 + ulong1 = va_arg(ap, unsigned long);
57110 + ulong2 = va_arg(ap, unsigned long);
57111 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57112 + break;
57113 + case GR_PTRACE:
57114 + task = va_arg(ap, struct task_struct *);
57115 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57116 + break;
57117 + case GR_RESOURCE:
57118 + task = va_arg(ap, struct task_struct *);
57119 + cred = __task_cred(task);
57120 + pcred = __task_cred(task->real_parent);
57121 + ulong1 = va_arg(ap, unsigned long);
57122 + str1 = va_arg(ap, char *);
57123 + ulong2 = va_arg(ap, unsigned long);
57124 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57125 + break;
57126 + case GR_CAP:
57127 + task = va_arg(ap, struct task_struct *);
57128 + cred = __task_cred(task);
57129 + pcred = __task_cred(task->real_parent);
57130 + str1 = va_arg(ap, char *);
57131 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57132 + break;
57133 + case GR_SIG:
57134 + str1 = va_arg(ap, char *);
57135 + voidptr = va_arg(ap, void *);
57136 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57137 + break;
57138 + case GR_SIG2:
57139 + task = va_arg(ap, struct task_struct *);
57140 + cred = __task_cred(task);
57141 + pcred = __task_cred(task->real_parent);
57142 + num1 = va_arg(ap, int);
57143 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57144 + break;
57145 + case GR_CRASH1:
57146 + task = va_arg(ap, struct task_struct *);
57147 + cred = __task_cred(task);
57148 + pcred = __task_cred(task->real_parent);
57149 + ulong1 = va_arg(ap, unsigned long);
57150 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57151 + break;
57152 + case GR_CRASH2:
57153 + task = va_arg(ap, struct task_struct *);
57154 + cred = __task_cred(task);
57155 + pcred = __task_cred(task->real_parent);
57156 + ulong1 = va_arg(ap, unsigned long);
57157 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57158 + break;
57159 + case GR_RWXMAP:
57160 + file = va_arg(ap, struct file *);
57161 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57162 + break;
57163 + case GR_PSACCT:
57164 + {
57165 + unsigned int wday, cday;
57166 + __u8 whr, chr;
57167 + __u8 wmin, cmin;
57168 + __u8 wsec, csec;
57169 + char cur_tty[64] = { 0 };
57170 + char parent_tty[64] = { 0 };
57171 +
57172 + task = va_arg(ap, struct task_struct *);
57173 + wday = va_arg(ap, unsigned int);
57174 + cday = va_arg(ap, unsigned int);
57175 + whr = va_arg(ap, int);
57176 + chr = va_arg(ap, int);
57177 + wmin = va_arg(ap, int);
57178 + cmin = va_arg(ap, int);
57179 + wsec = va_arg(ap, int);
57180 + csec = va_arg(ap, int);
57181 + ulong1 = va_arg(ap, unsigned long);
57182 + cred = __task_cred(task);
57183 + pcred = __task_cred(task->real_parent);
57184 +
57185 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57186 + }
57187 + break;
57188 + default:
57189 + gr_log_middle(audit, msg, ap);
57190 + }
57191 + va_end(ap);
57192 + // these don't need DEFAULTSECARGS printed on the end
57193 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57194 + gr_log_end(audit, 0);
57195 + else
57196 + gr_log_end(audit, 1);
57197 + END_LOCKS(audit);
57198 +}
57199 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57200 new file mode 100644
57201 index 0000000..f536303
57202 --- /dev/null
57203 +++ b/grsecurity/grsec_mem.c
57204 @@ -0,0 +1,40 @@
57205 +#include <linux/kernel.h>
57206 +#include <linux/sched.h>
57207 +#include <linux/mm.h>
57208 +#include <linux/mman.h>
57209 +#include <linux/grinternal.h>
57210 +
57211 +void
57212 +gr_handle_ioperm(void)
57213 +{
57214 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57215 + return;
57216 +}
57217 +
57218 +void
57219 +gr_handle_iopl(void)
57220 +{
57221 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57222 + return;
57223 +}
57224 +
57225 +void
57226 +gr_handle_mem_readwrite(u64 from, u64 to)
57227 +{
57228 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57229 + return;
57230 +}
57231 +
57232 +void
57233 +gr_handle_vm86(void)
57234 +{
57235 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57236 + return;
57237 +}
57238 +
57239 +void
57240 +gr_log_badprocpid(const char *entry)
57241 +{
57242 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57243 + return;
57244 +}
57245 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57246 new file mode 100644
57247 index 0000000..2131422
57248 --- /dev/null
57249 +++ b/grsecurity/grsec_mount.c
57250 @@ -0,0 +1,62 @@
57251 +#include <linux/kernel.h>
57252 +#include <linux/sched.h>
57253 +#include <linux/mount.h>
57254 +#include <linux/grsecurity.h>
57255 +#include <linux/grinternal.h>
57256 +
57257 +void
57258 +gr_log_remount(const char *devname, const int retval)
57259 +{
57260 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57261 + if (grsec_enable_mount && (retval >= 0))
57262 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57263 +#endif
57264 + return;
57265 +}
57266 +
57267 +void
57268 +gr_log_unmount(const char *devname, const int retval)
57269 +{
57270 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57271 + if (grsec_enable_mount && (retval >= 0))
57272 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57273 +#endif
57274 + return;
57275 +}
57276 +
57277 +void
57278 +gr_log_mount(const char *from, const char *to, const int retval)
57279 +{
57280 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57281 + if (grsec_enable_mount && (retval >= 0))
57282 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57283 +#endif
57284 + return;
57285 +}
57286 +
57287 +int
57288 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57289 +{
57290 +#ifdef CONFIG_GRKERNSEC_ROFS
57291 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57292 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57293 + return -EPERM;
57294 + } else
57295 + return 0;
57296 +#endif
57297 + return 0;
57298 +}
57299 +
57300 +int
57301 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57302 +{
57303 +#ifdef CONFIG_GRKERNSEC_ROFS
57304 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57305 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57306 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57307 + return -EPERM;
57308 + } else
57309 + return 0;
57310 +#endif
57311 + return 0;
57312 +}
57313 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57314 new file mode 100644
57315 index 0000000..a3b12a0
57316 --- /dev/null
57317 +++ b/grsecurity/grsec_pax.c
57318 @@ -0,0 +1,36 @@
57319 +#include <linux/kernel.h>
57320 +#include <linux/sched.h>
57321 +#include <linux/mm.h>
57322 +#include <linux/file.h>
57323 +#include <linux/grinternal.h>
57324 +#include <linux/grsecurity.h>
57325 +
57326 +void
57327 +gr_log_textrel(struct vm_area_struct * vma)
57328 +{
57329 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57330 + if (grsec_enable_audit_textrel)
57331 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57332 +#endif
57333 + return;
57334 +}
57335 +
57336 +void
57337 +gr_log_rwxmmap(struct file *file)
57338 +{
57339 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57340 + if (grsec_enable_log_rwxmaps)
57341 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57342 +#endif
57343 + return;
57344 +}
57345 +
57346 +void
57347 +gr_log_rwxmprotect(struct file *file)
57348 +{
57349 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57350 + if (grsec_enable_log_rwxmaps)
57351 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57352 +#endif
57353 + return;
57354 +}
57355 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57356 new file mode 100644
57357 index 0000000..f7f29aa
57358 --- /dev/null
57359 +++ b/grsecurity/grsec_ptrace.c
57360 @@ -0,0 +1,30 @@
57361 +#include <linux/kernel.h>
57362 +#include <linux/sched.h>
57363 +#include <linux/grinternal.h>
57364 +#include <linux/security.h>
57365 +
57366 +void
57367 +gr_audit_ptrace(struct task_struct *task)
57368 +{
57369 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57370 + if (grsec_enable_audit_ptrace)
57371 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57372 +#endif
57373 + return;
57374 +}
57375 +
57376 +int
57377 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57378 +{
57379 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57380 + const struct dentry *dentry = file->f_path.dentry;
57381 + const struct vfsmount *mnt = file->f_path.mnt;
57382 +
57383 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57384 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57385 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57386 + return -EACCES;
57387 + }
57388 +#endif
57389 + return 0;
57390 +}
57391 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57392 new file mode 100644
57393 index 0000000..7a5b2de
57394 --- /dev/null
57395 +++ b/grsecurity/grsec_sig.c
57396 @@ -0,0 +1,207 @@
57397 +#include <linux/kernel.h>
57398 +#include <linux/sched.h>
57399 +#include <linux/delay.h>
57400 +#include <linux/grsecurity.h>
57401 +#include <linux/grinternal.h>
57402 +#include <linux/hardirq.h>
57403 +
57404 +char *signames[] = {
57405 + [SIGSEGV] = "Segmentation fault",
57406 + [SIGILL] = "Illegal instruction",
57407 + [SIGABRT] = "Abort",
57408 + [SIGBUS] = "Invalid alignment/Bus error"
57409 +};
57410 +
57411 +void
57412 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57413 +{
57414 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57415 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57416 + (sig == SIGABRT) || (sig == SIGBUS))) {
57417 + if (t->pid == current->pid) {
57418 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57419 + } else {
57420 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57421 + }
57422 + }
57423 +#endif
57424 + return;
57425 +}
57426 +
57427 +int
57428 +gr_handle_signal(const struct task_struct *p, const int sig)
57429 +{
57430 +#ifdef CONFIG_GRKERNSEC
57431 + /* ignore the 0 signal for protected task checks */
57432 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57433 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57434 + return -EPERM;
57435 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57436 + return -EPERM;
57437 + }
57438 +#endif
57439 + return 0;
57440 +}
57441 +
57442 +#ifdef CONFIG_GRKERNSEC
57443 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57444 +
57445 +int gr_fake_force_sig(int sig, struct task_struct *t)
57446 +{
57447 + unsigned long int flags;
57448 + int ret, blocked, ignored;
57449 + struct k_sigaction *action;
57450 +
57451 + spin_lock_irqsave(&t->sighand->siglock, flags);
57452 + action = &t->sighand->action[sig-1];
57453 + ignored = action->sa.sa_handler == SIG_IGN;
57454 + blocked = sigismember(&t->blocked, sig);
57455 + if (blocked || ignored) {
57456 + action->sa.sa_handler = SIG_DFL;
57457 + if (blocked) {
57458 + sigdelset(&t->blocked, sig);
57459 + recalc_sigpending_and_wake(t);
57460 + }
57461 + }
57462 + if (action->sa.sa_handler == SIG_DFL)
57463 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57464 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57465 +
57466 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57467 +
57468 + return ret;
57469 +}
57470 +#endif
57471 +
57472 +#ifdef CONFIG_GRKERNSEC_BRUTE
57473 +#define GR_USER_BAN_TIME (15 * 60)
57474 +
57475 +static int __get_dumpable(unsigned long mm_flags)
57476 +{
57477 + int ret;
57478 +
57479 + ret = mm_flags & MMF_DUMPABLE_MASK;
57480 + return (ret >= 2) ? 2 : ret;
57481 +}
57482 +#endif
57483 +
57484 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57485 +{
57486 +#ifdef CONFIG_GRKERNSEC_BRUTE
57487 + uid_t uid = 0;
57488 +
57489 + if (!grsec_enable_brute)
57490 + return;
57491 +
57492 + rcu_read_lock();
57493 + read_lock(&tasklist_lock);
57494 + read_lock(&grsec_exec_file_lock);
57495 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57496 + p->real_parent->brute = 1;
57497 + else {
57498 + const struct cred *cred = __task_cred(p), *cred2;
57499 + struct task_struct *tsk, *tsk2;
57500 +
57501 + if (!__get_dumpable(mm_flags) && cred->uid) {
57502 + struct user_struct *user;
57503 +
57504 + uid = cred->uid;
57505 +
57506 + /* this is put upon execution past expiration */
57507 + user = find_user(uid);
57508 + if (user == NULL)
57509 + goto unlock;
57510 + user->banned = 1;
57511 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57512 + if (user->ban_expires == ~0UL)
57513 + user->ban_expires--;
57514 +
57515 + do_each_thread(tsk2, tsk) {
57516 + cred2 = __task_cred(tsk);
57517 + if (tsk != p && cred2->uid == uid)
57518 + gr_fake_force_sig(SIGKILL, tsk);
57519 + } while_each_thread(tsk2, tsk);
57520 + }
57521 + }
57522 +unlock:
57523 + read_unlock(&grsec_exec_file_lock);
57524 + read_unlock(&tasklist_lock);
57525 + rcu_read_unlock();
57526 +
57527 + if (uid)
57528 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57529 +
57530 +#endif
57531 + return;
57532 +}
57533 +
57534 +void gr_handle_brute_check(void)
57535 +{
57536 +#ifdef CONFIG_GRKERNSEC_BRUTE
57537 + if (current->brute)
57538 + msleep(30 * 1000);
57539 +#endif
57540 + return;
57541 +}
57542 +
57543 +void gr_handle_kernel_exploit(void)
57544 +{
57545 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57546 + const struct cred *cred;
57547 + struct task_struct *tsk, *tsk2;
57548 + struct user_struct *user;
57549 + uid_t uid;
57550 +
57551 + if (in_irq() || in_serving_softirq() || in_nmi())
57552 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57553 +
57554 + uid = current_uid();
57555 +
57556 + if (uid == 0)
57557 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57558 + else {
57559 + /* kill all the processes of this user, hold a reference
57560 + to their creds struct, and prevent them from creating
57561 + another process until system reset
57562 + */
57563 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57564 + /* we intentionally leak this ref */
57565 + user = get_uid(current->cred->user);
57566 + if (user) {
57567 + user->banned = 1;
57568 + user->ban_expires = ~0UL;
57569 + }
57570 +
57571 + read_lock(&tasklist_lock);
57572 + do_each_thread(tsk2, tsk) {
57573 + cred = __task_cred(tsk);
57574 + if (cred->uid == uid)
57575 + gr_fake_force_sig(SIGKILL, tsk);
57576 + } while_each_thread(tsk2, tsk);
57577 + read_unlock(&tasklist_lock);
57578 + }
57579 +#endif
57580 +}
57581 +
57582 +int __gr_process_user_ban(struct user_struct *user)
57583 +{
57584 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57585 + if (unlikely(user->banned)) {
57586 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57587 + user->banned = 0;
57588 + user->ban_expires = 0;
57589 + free_uid(user);
57590 + } else
57591 + return -EPERM;
57592 + }
57593 +#endif
57594 + return 0;
57595 +}
57596 +
57597 +int gr_process_user_ban(void)
57598 +{
57599 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57600 + return __gr_process_user_ban(current->cred->user);
57601 +#endif
57602 + return 0;
57603 +}
57604 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57605 new file mode 100644
57606 index 0000000..4030d57
57607 --- /dev/null
57608 +++ b/grsecurity/grsec_sock.c
57609 @@ -0,0 +1,244 @@
57610 +#include <linux/kernel.h>
57611 +#include <linux/module.h>
57612 +#include <linux/sched.h>
57613 +#include <linux/file.h>
57614 +#include <linux/net.h>
57615 +#include <linux/in.h>
57616 +#include <linux/ip.h>
57617 +#include <net/sock.h>
57618 +#include <net/inet_sock.h>
57619 +#include <linux/grsecurity.h>
57620 +#include <linux/grinternal.h>
57621 +#include <linux/gracl.h>
57622 +
57623 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57624 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57625 +
57626 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57627 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57628 +
57629 +#ifdef CONFIG_UNIX_MODULE
57630 +EXPORT_SYMBOL(gr_acl_handle_unix);
57631 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57632 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57633 +EXPORT_SYMBOL(gr_handle_create);
57634 +#endif
57635 +
57636 +#ifdef CONFIG_GRKERNSEC
57637 +#define gr_conn_table_size 32749
57638 +struct conn_table_entry {
57639 + struct conn_table_entry *next;
57640 + struct signal_struct *sig;
57641 +};
57642 +
57643 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57644 +DEFINE_SPINLOCK(gr_conn_table_lock);
57645 +
57646 +extern const char * gr_socktype_to_name(unsigned char type);
57647 +extern const char * gr_proto_to_name(unsigned char proto);
57648 +extern const char * gr_sockfamily_to_name(unsigned char family);
57649 +
57650 +static __inline__ int
57651 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57652 +{
57653 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57654 +}
57655 +
57656 +static __inline__ int
57657 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57658 + __u16 sport, __u16 dport)
57659 +{
57660 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57661 + sig->gr_sport == sport && sig->gr_dport == dport))
57662 + return 1;
57663 + else
57664 + return 0;
57665 +}
57666 +
57667 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57668 +{
57669 + struct conn_table_entry **match;
57670 + unsigned int index;
57671 +
57672 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57673 + sig->gr_sport, sig->gr_dport,
57674 + gr_conn_table_size);
57675 +
57676 + newent->sig = sig;
57677 +
57678 + match = &gr_conn_table[index];
57679 + newent->next = *match;
57680 + *match = newent;
57681 +
57682 + return;
57683 +}
57684 +
57685 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
57686 +{
57687 + struct conn_table_entry *match, *last = NULL;
57688 + unsigned int index;
57689 +
57690 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57691 + sig->gr_sport, sig->gr_dport,
57692 + gr_conn_table_size);
57693 +
57694 + match = gr_conn_table[index];
57695 + while (match && !conn_match(match->sig,
57696 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
57697 + sig->gr_dport)) {
57698 + last = match;
57699 + match = match->next;
57700 + }
57701 +
57702 + if (match) {
57703 + if (last)
57704 + last->next = match->next;
57705 + else
57706 + gr_conn_table[index] = NULL;
57707 + kfree(match);
57708 + }
57709 +
57710 + return;
57711 +}
57712 +
57713 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
57714 + __u16 sport, __u16 dport)
57715 +{
57716 + struct conn_table_entry *match;
57717 + unsigned int index;
57718 +
57719 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
57720 +
57721 + match = gr_conn_table[index];
57722 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
57723 + match = match->next;
57724 +
57725 + if (match)
57726 + return match->sig;
57727 + else
57728 + return NULL;
57729 +}
57730 +
57731 +#endif
57732 +
57733 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
57734 +{
57735 +#ifdef CONFIG_GRKERNSEC
57736 + struct signal_struct *sig = task->signal;
57737 + struct conn_table_entry *newent;
57738 +
57739 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
57740 + if (newent == NULL)
57741 + return;
57742 + /* no bh lock needed since we are called with bh disabled */
57743 + spin_lock(&gr_conn_table_lock);
57744 + gr_del_task_from_ip_table_nolock(sig);
57745 + sig->gr_saddr = inet->inet_rcv_saddr;
57746 + sig->gr_daddr = inet->inet_daddr;
57747 + sig->gr_sport = inet->inet_sport;
57748 + sig->gr_dport = inet->inet_dport;
57749 + gr_add_to_task_ip_table_nolock(sig, newent);
57750 + spin_unlock(&gr_conn_table_lock);
57751 +#endif
57752 + return;
57753 +}
57754 +
57755 +void gr_del_task_from_ip_table(struct task_struct *task)
57756 +{
57757 +#ifdef CONFIG_GRKERNSEC
57758 + spin_lock_bh(&gr_conn_table_lock);
57759 + gr_del_task_from_ip_table_nolock(task->signal);
57760 + spin_unlock_bh(&gr_conn_table_lock);
57761 +#endif
57762 + return;
57763 +}
57764 +
57765 +void
57766 +gr_attach_curr_ip(const struct sock *sk)
57767 +{
57768 +#ifdef CONFIG_GRKERNSEC
57769 + struct signal_struct *p, *set;
57770 + const struct inet_sock *inet = inet_sk(sk);
57771 +
57772 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
57773 + return;
57774 +
57775 + set = current->signal;
57776 +
57777 + spin_lock_bh(&gr_conn_table_lock);
57778 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
57779 + inet->inet_dport, inet->inet_sport);
57780 + if (unlikely(p != NULL)) {
57781 + set->curr_ip = p->curr_ip;
57782 + set->used_accept = 1;
57783 + gr_del_task_from_ip_table_nolock(p);
57784 + spin_unlock_bh(&gr_conn_table_lock);
57785 + return;
57786 + }
57787 + spin_unlock_bh(&gr_conn_table_lock);
57788 +
57789 + set->curr_ip = inet->inet_daddr;
57790 + set->used_accept = 1;
57791 +#endif
57792 + return;
57793 +}
57794 +
57795 +int
57796 +gr_handle_sock_all(const int family, const int type, const int protocol)
57797 +{
57798 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57799 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
57800 + (family != AF_UNIX)) {
57801 + if (family == AF_INET)
57802 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
57803 + else
57804 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
57805 + return -EACCES;
57806 + }
57807 +#endif
57808 + return 0;
57809 +}
57810 +
57811 +int
57812 +gr_handle_sock_server(const struct sockaddr *sck)
57813 +{
57814 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57815 + if (grsec_enable_socket_server &&
57816 + in_group_p(grsec_socket_server_gid) &&
57817 + sck && (sck->sa_family != AF_UNIX) &&
57818 + (sck->sa_family != AF_LOCAL)) {
57819 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57820 + return -EACCES;
57821 + }
57822 +#endif
57823 + return 0;
57824 +}
57825 +
57826 +int
57827 +gr_handle_sock_server_other(const struct sock *sck)
57828 +{
57829 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57830 + if (grsec_enable_socket_server &&
57831 + in_group_p(grsec_socket_server_gid) &&
57832 + sck && (sck->sk_family != AF_UNIX) &&
57833 + (sck->sk_family != AF_LOCAL)) {
57834 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57835 + return -EACCES;
57836 + }
57837 +#endif
57838 + return 0;
57839 +}
57840 +
57841 +int
57842 +gr_handle_sock_client(const struct sockaddr *sck)
57843 +{
57844 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57845 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
57846 + sck && (sck->sa_family != AF_UNIX) &&
57847 + (sck->sa_family != AF_LOCAL)) {
57848 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
57849 + return -EACCES;
57850 + }
57851 +#endif
57852 + return 0;
57853 +}
57854 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
57855 new file mode 100644
57856 index 0000000..a1aedd7
57857 --- /dev/null
57858 +++ b/grsecurity/grsec_sysctl.c
57859 @@ -0,0 +1,451 @@
57860 +#include <linux/kernel.h>
57861 +#include <linux/sched.h>
57862 +#include <linux/sysctl.h>
57863 +#include <linux/grsecurity.h>
57864 +#include <linux/grinternal.h>
57865 +
57866 +int
57867 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
57868 +{
57869 +#ifdef CONFIG_GRKERNSEC_SYSCTL
57870 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
57871 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
57872 + return -EACCES;
57873 + }
57874 +#endif
57875 + return 0;
57876 +}
57877 +
57878 +#ifdef CONFIG_GRKERNSEC_ROFS
57879 +static int __maybe_unused one = 1;
57880 +#endif
57881 +
57882 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
57883 +struct ctl_table grsecurity_table[] = {
57884 +#ifdef CONFIG_GRKERNSEC_SYSCTL
57885 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
57886 +#ifdef CONFIG_GRKERNSEC_IO
57887 + {
57888 + .procname = "disable_priv_io",
57889 + .data = &grsec_disable_privio,
57890 + .maxlen = sizeof(int),
57891 + .mode = 0600,
57892 + .proc_handler = &proc_dointvec,
57893 + },
57894 +#endif
57895 +#endif
57896 +#ifdef CONFIG_GRKERNSEC_LINK
57897 + {
57898 + .procname = "linking_restrictions",
57899 + .data = &grsec_enable_link,
57900 + .maxlen = sizeof(int),
57901 + .mode = 0600,
57902 + .proc_handler = &proc_dointvec,
57903 + },
57904 +#endif
57905 +#ifdef CONFIG_GRKERNSEC_BRUTE
57906 + {
57907 + .procname = "deter_bruteforce",
57908 + .data = &grsec_enable_brute,
57909 + .maxlen = sizeof(int),
57910 + .mode = 0600,
57911 + .proc_handler = &proc_dointvec,
57912 + },
57913 +#endif
57914 +#ifdef CONFIG_GRKERNSEC_FIFO
57915 + {
57916 + .procname = "fifo_restrictions",
57917 + .data = &grsec_enable_fifo,
57918 + .maxlen = sizeof(int),
57919 + .mode = 0600,
57920 + .proc_handler = &proc_dointvec,
57921 + },
57922 +#endif
57923 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57924 + {
57925 + .procname = "ptrace_readexec",
57926 + .data = &grsec_enable_ptrace_readexec,
57927 + .maxlen = sizeof(int),
57928 + .mode = 0600,
57929 + .proc_handler = &proc_dointvec,
57930 + },
57931 +#endif
57932 +#ifdef CONFIG_GRKERNSEC_SETXID
57933 + {
57934 + .procname = "consistent_setxid",
57935 + .data = &grsec_enable_setxid,
57936 + .maxlen = sizeof(int),
57937 + .mode = 0600,
57938 + .proc_handler = &proc_dointvec,
57939 + },
57940 +#endif
57941 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57942 + {
57943 + .procname = "ip_blackhole",
57944 + .data = &grsec_enable_blackhole,
57945 + .maxlen = sizeof(int),
57946 + .mode = 0600,
57947 + .proc_handler = &proc_dointvec,
57948 + },
57949 + {
57950 + .procname = "lastack_retries",
57951 + .data = &grsec_lastack_retries,
57952 + .maxlen = sizeof(int),
57953 + .mode = 0600,
57954 + .proc_handler = &proc_dointvec,
57955 + },
57956 +#endif
57957 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57958 + {
57959 + .procname = "exec_logging",
57960 + .data = &grsec_enable_execlog,
57961 + .maxlen = sizeof(int),
57962 + .mode = 0600,
57963 + .proc_handler = &proc_dointvec,
57964 + },
57965 +#endif
57966 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57967 + {
57968 + .procname = "rwxmap_logging",
57969 + .data = &grsec_enable_log_rwxmaps,
57970 + .maxlen = sizeof(int),
57971 + .mode = 0600,
57972 + .proc_handler = &proc_dointvec,
57973 + },
57974 +#endif
57975 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57976 + {
57977 + .procname = "signal_logging",
57978 + .data = &grsec_enable_signal,
57979 + .maxlen = sizeof(int),
57980 + .mode = 0600,
57981 + .proc_handler = &proc_dointvec,
57982 + },
57983 +#endif
57984 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57985 + {
57986 + .procname = "forkfail_logging",
57987 + .data = &grsec_enable_forkfail,
57988 + .maxlen = sizeof(int),
57989 + .mode = 0600,
57990 + .proc_handler = &proc_dointvec,
57991 + },
57992 +#endif
57993 +#ifdef CONFIG_GRKERNSEC_TIME
57994 + {
57995 + .procname = "timechange_logging",
57996 + .data = &grsec_enable_time,
57997 + .maxlen = sizeof(int),
57998 + .mode = 0600,
57999 + .proc_handler = &proc_dointvec,
58000 + },
58001 +#endif
58002 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58003 + {
58004 + .procname = "chroot_deny_shmat",
58005 + .data = &grsec_enable_chroot_shmat,
58006 + .maxlen = sizeof(int),
58007 + .mode = 0600,
58008 + .proc_handler = &proc_dointvec,
58009 + },
58010 +#endif
58011 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58012 + {
58013 + .procname = "chroot_deny_unix",
58014 + .data = &grsec_enable_chroot_unix,
58015 + .maxlen = sizeof(int),
58016 + .mode = 0600,
58017 + .proc_handler = &proc_dointvec,
58018 + },
58019 +#endif
58020 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58021 + {
58022 + .procname = "chroot_deny_mount",
58023 + .data = &grsec_enable_chroot_mount,
58024 + .maxlen = sizeof(int),
58025 + .mode = 0600,
58026 + .proc_handler = &proc_dointvec,
58027 + },
58028 +#endif
58029 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58030 + {
58031 + .procname = "chroot_deny_fchdir",
58032 + .data = &grsec_enable_chroot_fchdir,
58033 + .maxlen = sizeof(int),
58034 + .mode = 0600,
58035 + .proc_handler = &proc_dointvec,
58036 + },
58037 +#endif
58038 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58039 + {
58040 + .procname = "chroot_deny_chroot",
58041 + .data = &grsec_enable_chroot_double,
58042 + .maxlen = sizeof(int),
58043 + .mode = 0600,
58044 + .proc_handler = &proc_dointvec,
58045 + },
58046 +#endif
58047 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58048 + {
58049 + .procname = "chroot_deny_pivot",
58050 + .data = &grsec_enable_chroot_pivot,
58051 + .maxlen = sizeof(int),
58052 + .mode = 0600,
58053 + .proc_handler = &proc_dointvec,
58054 + },
58055 +#endif
58056 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58057 + {
58058 + .procname = "chroot_enforce_chdir",
58059 + .data = &grsec_enable_chroot_chdir,
58060 + .maxlen = sizeof(int),
58061 + .mode = 0600,
58062 + .proc_handler = &proc_dointvec,
58063 + },
58064 +#endif
58065 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58066 + {
58067 + .procname = "chroot_deny_chmod",
58068 + .data = &grsec_enable_chroot_chmod,
58069 + .maxlen = sizeof(int),
58070 + .mode = 0600,
58071 + .proc_handler = &proc_dointvec,
58072 + },
58073 +#endif
58074 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58075 + {
58076 + .procname = "chroot_deny_mknod",
58077 + .data = &grsec_enable_chroot_mknod,
58078 + .maxlen = sizeof(int),
58079 + .mode = 0600,
58080 + .proc_handler = &proc_dointvec,
58081 + },
58082 +#endif
58083 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58084 + {
58085 + .procname = "chroot_restrict_nice",
58086 + .data = &grsec_enable_chroot_nice,
58087 + .maxlen = sizeof(int),
58088 + .mode = 0600,
58089 + .proc_handler = &proc_dointvec,
58090 + },
58091 +#endif
58092 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58093 + {
58094 + .procname = "chroot_execlog",
58095 + .data = &grsec_enable_chroot_execlog,
58096 + .maxlen = sizeof(int),
58097 + .mode = 0600,
58098 + .proc_handler = &proc_dointvec,
58099 + },
58100 +#endif
58101 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58102 + {
58103 + .procname = "chroot_caps",
58104 + .data = &grsec_enable_chroot_caps,
58105 + .maxlen = sizeof(int),
58106 + .mode = 0600,
58107 + .proc_handler = &proc_dointvec,
58108 + },
58109 +#endif
58110 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58111 + {
58112 + .procname = "chroot_deny_sysctl",
58113 + .data = &grsec_enable_chroot_sysctl,
58114 + .maxlen = sizeof(int),
58115 + .mode = 0600,
58116 + .proc_handler = &proc_dointvec,
58117 + },
58118 +#endif
58119 +#ifdef CONFIG_GRKERNSEC_TPE
58120 + {
58121 + .procname = "tpe",
58122 + .data = &grsec_enable_tpe,
58123 + .maxlen = sizeof(int),
58124 + .mode = 0600,
58125 + .proc_handler = &proc_dointvec,
58126 + },
58127 + {
58128 + .procname = "tpe_gid",
58129 + .data = &grsec_tpe_gid,
58130 + .maxlen = sizeof(int),
58131 + .mode = 0600,
58132 + .proc_handler = &proc_dointvec,
58133 + },
58134 +#endif
58135 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58136 + {
58137 + .procname = "tpe_invert",
58138 + .data = &grsec_enable_tpe_invert,
58139 + .maxlen = sizeof(int),
58140 + .mode = 0600,
58141 + .proc_handler = &proc_dointvec,
58142 + },
58143 +#endif
58144 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58145 + {
58146 + .procname = "tpe_restrict_all",
58147 + .data = &grsec_enable_tpe_all,
58148 + .maxlen = sizeof(int),
58149 + .mode = 0600,
58150 + .proc_handler = &proc_dointvec,
58151 + },
58152 +#endif
58153 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58154 + {
58155 + .procname = "socket_all",
58156 + .data = &grsec_enable_socket_all,
58157 + .maxlen = sizeof(int),
58158 + .mode = 0600,
58159 + .proc_handler = &proc_dointvec,
58160 + },
58161 + {
58162 + .procname = "socket_all_gid",
58163 + .data = &grsec_socket_all_gid,
58164 + .maxlen = sizeof(int),
58165 + .mode = 0600,
58166 + .proc_handler = &proc_dointvec,
58167 + },
58168 +#endif
58169 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58170 + {
58171 + .procname = "socket_client",
58172 + .data = &grsec_enable_socket_client,
58173 + .maxlen = sizeof(int),
58174 + .mode = 0600,
58175 + .proc_handler = &proc_dointvec,
58176 + },
58177 + {
58178 + .procname = "socket_client_gid",
58179 + .data = &grsec_socket_client_gid,
58180 + .maxlen = sizeof(int),
58181 + .mode = 0600,
58182 + .proc_handler = &proc_dointvec,
58183 + },
58184 +#endif
58185 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58186 + {
58187 + .procname = "socket_server",
58188 + .data = &grsec_enable_socket_server,
58189 + .maxlen = sizeof(int),
58190 + .mode = 0600,
58191 + .proc_handler = &proc_dointvec,
58192 + },
58193 + {
58194 + .procname = "socket_server_gid",
58195 + .data = &grsec_socket_server_gid,
58196 + .maxlen = sizeof(int),
58197 + .mode = 0600,
58198 + .proc_handler = &proc_dointvec,
58199 + },
58200 +#endif
58201 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58202 + {
58203 + .procname = "audit_group",
58204 + .data = &grsec_enable_group,
58205 + .maxlen = sizeof(int),
58206 + .mode = 0600,
58207 + .proc_handler = &proc_dointvec,
58208 + },
58209 + {
58210 + .procname = "audit_gid",
58211 + .data = &grsec_audit_gid,
58212 + .maxlen = sizeof(int),
58213 + .mode = 0600,
58214 + .proc_handler = &proc_dointvec,
58215 + },
58216 +#endif
58217 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58218 + {
58219 + .procname = "audit_chdir",
58220 + .data = &grsec_enable_chdir,
58221 + .maxlen = sizeof(int),
58222 + .mode = 0600,
58223 + .proc_handler = &proc_dointvec,
58224 + },
58225 +#endif
58226 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58227 + {
58228 + .procname = "audit_mount",
58229 + .data = &grsec_enable_mount,
58230 + .maxlen = sizeof(int),
58231 + .mode = 0600,
58232 + .proc_handler = &proc_dointvec,
58233 + },
58234 +#endif
58235 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58236 + {
58237 + .procname = "audit_textrel",
58238 + .data = &grsec_enable_audit_textrel,
58239 + .maxlen = sizeof(int),
58240 + .mode = 0600,
58241 + .proc_handler = &proc_dointvec,
58242 + },
58243 +#endif
58244 +#ifdef CONFIG_GRKERNSEC_DMESG
58245 + {
58246 + .procname = "dmesg",
58247 + .data = &grsec_enable_dmesg,
58248 + .maxlen = sizeof(int),
58249 + .mode = 0600,
58250 + .proc_handler = &proc_dointvec,
58251 + },
58252 +#endif
58253 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58254 + {
58255 + .procname = "chroot_findtask",
58256 + .data = &grsec_enable_chroot_findtask,
58257 + .maxlen = sizeof(int),
58258 + .mode = 0600,
58259 + .proc_handler = &proc_dointvec,
58260 + },
58261 +#endif
58262 +#ifdef CONFIG_GRKERNSEC_RESLOG
58263 + {
58264 + .procname = "resource_logging",
58265 + .data = &grsec_resource_logging,
58266 + .maxlen = sizeof(int),
58267 + .mode = 0600,
58268 + .proc_handler = &proc_dointvec,
58269 + },
58270 +#endif
58271 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58272 + {
58273 + .procname = "audit_ptrace",
58274 + .data = &grsec_enable_audit_ptrace,
58275 + .maxlen = sizeof(int),
58276 + .mode = 0600,
58277 + .proc_handler = &proc_dointvec,
58278 + },
58279 +#endif
58280 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58281 + {
58282 + .procname = "harden_ptrace",
58283 + .data = &grsec_enable_harden_ptrace,
58284 + .maxlen = sizeof(int),
58285 + .mode = 0600,
58286 + .proc_handler = &proc_dointvec,
58287 + },
58288 +#endif
58289 + {
58290 + .procname = "grsec_lock",
58291 + .data = &grsec_lock,
58292 + .maxlen = sizeof(int),
58293 + .mode = 0600,
58294 + .proc_handler = &proc_dointvec,
58295 + },
58296 +#endif
58297 +#ifdef CONFIG_GRKERNSEC_ROFS
58298 + {
58299 + .procname = "romount_protect",
58300 + .data = &grsec_enable_rofs,
58301 + .maxlen = sizeof(int),
58302 + .mode = 0600,
58303 + .proc_handler = &proc_dointvec_minmax,
58304 + .extra1 = &one,
58305 + .extra2 = &one,
58306 + },
58307 +#endif
58308 + { }
58309 +};
58310 +#endif
58311 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58312 new file mode 100644
58313 index 0000000..0dc13c3
58314 --- /dev/null
58315 +++ b/grsecurity/grsec_time.c
58316 @@ -0,0 +1,16 @@
58317 +#include <linux/kernel.h>
58318 +#include <linux/sched.h>
58319 +#include <linux/grinternal.h>
58320 +#include <linux/module.h>
58321 +
58322 +void
58323 +gr_log_timechange(void)
58324 +{
58325 +#ifdef CONFIG_GRKERNSEC_TIME
58326 + if (grsec_enable_time)
58327 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58328 +#endif
58329 + return;
58330 +}
58331 +
58332 +EXPORT_SYMBOL(gr_log_timechange);
58333 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58334 new file mode 100644
58335 index 0000000..07e0dc0
58336 --- /dev/null
58337 +++ b/grsecurity/grsec_tpe.c
58338 @@ -0,0 +1,73 @@
58339 +#include <linux/kernel.h>
58340 +#include <linux/sched.h>
58341 +#include <linux/file.h>
58342 +#include <linux/fs.h>
58343 +#include <linux/grinternal.h>
58344 +
58345 +extern int gr_acl_tpe_check(void);
58346 +
58347 +int
58348 +gr_tpe_allow(const struct file *file)
58349 +{
58350 +#ifdef CONFIG_GRKERNSEC
58351 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58352 + const struct cred *cred = current_cred();
58353 + char *msg = NULL;
58354 + char *msg2 = NULL;
58355 +
58356 + // never restrict root
58357 + if (!cred->uid)
58358 + return 1;
58359 +
58360 + if (grsec_enable_tpe) {
58361 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58362 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58363 + msg = "not being in trusted group";
58364 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58365 + msg = "being in untrusted group";
58366 +#else
58367 + if (in_group_p(grsec_tpe_gid))
58368 + msg = "being in untrusted group";
58369 +#endif
58370 + }
58371 + if (!msg && gr_acl_tpe_check())
58372 + msg = "being in untrusted role";
58373 +
58374 + // not in any affected group/role
58375 + if (!msg)
58376 + goto next_check;
58377 +
58378 + if (inode->i_uid)
58379 + msg2 = "file in non-root-owned directory";
58380 + else if (inode->i_mode & S_IWOTH)
58381 + msg2 = "file in world-writable directory";
58382 + else if (inode->i_mode & S_IWGRP)
58383 + msg2 = "file in group-writable directory";
58384 +
58385 + if (msg && msg2) {
58386 + char fullmsg[70] = {0};
58387 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58388 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58389 + return 0;
58390 + }
58391 + msg = NULL;
58392 +next_check:
58393 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58394 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58395 + return 1;
58396 +
58397 + if (inode->i_uid && (inode->i_uid != cred->uid))
58398 + msg = "directory not owned by user";
58399 + else if (inode->i_mode & S_IWOTH)
58400 + msg = "file in world-writable directory";
58401 + else if (inode->i_mode & S_IWGRP)
58402 + msg = "file in group-writable directory";
58403 +
58404 + if (msg) {
58405 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58406 + return 0;
58407 + }
58408 +#endif
58409 +#endif
58410 + return 1;
58411 +}
58412 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58413 new file mode 100644
58414 index 0000000..9f7b1ac
58415 --- /dev/null
58416 +++ b/grsecurity/grsum.c
58417 @@ -0,0 +1,61 @@
58418 +#include <linux/err.h>
58419 +#include <linux/kernel.h>
58420 +#include <linux/sched.h>
58421 +#include <linux/mm.h>
58422 +#include <linux/scatterlist.h>
58423 +#include <linux/crypto.h>
58424 +#include <linux/gracl.h>
58425 +
58426 +
58427 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58428 +#error "crypto and sha256 must be built into the kernel"
58429 +#endif
58430 +
58431 +int
58432 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58433 +{
58434 + char *p;
58435 + struct crypto_hash *tfm;
58436 + struct hash_desc desc;
58437 + struct scatterlist sg;
58438 + unsigned char temp_sum[GR_SHA_LEN];
58439 + volatile int retval = 0;
58440 + volatile int dummy = 0;
58441 + unsigned int i;
58442 +
58443 + sg_init_table(&sg, 1);
58444 +
58445 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58446 + if (IS_ERR(tfm)) {
58447 + /* should never happen, since sha256 should be built in */
58448 + return 1;
58449 + }
58450 +
58451 + desc.tfm = tfm;
58452 + desc.flags = 0;
58453 +
58454 + crypto_hash_init(&desc);
58455 +
58456 + p = salt;
58457 + sg_set_buf(&sg, p, GR_SALT_LEN);
58458 + crypto_hash_update(&desc, &sg, sg.length);
58459 +
58460 + p = entry->pw;
58461 + sg_set_buf(&sg, p, strlen(p));
58462 +
58463 + crypto_hash_update(&desc, &sg, sg.length);
58464 +
58465 + crypto_hash_final(&desc, temp_sum);
58466 +
58467 + memset(entry->pw, 0, GR_PW_LEN);
58468 +
58469 + for (i = 0; i < GR_SHA_LEN; i++)
58470 + if (sum[i] != temp_sum[i])
58471 + retval = 1;
58472 + else
58473 + dummy = 1; // waste a cycle
58474 +
58475 + crypto_free_hash(tfm);
58476 +
58477 + return retval;
58478 +}
58479 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58480 index 6cd5b64..f620d2d 100644
58481 --- a/include/acpi/acpi_bus.h
58482 +++ b/include/acpi/acpi_bus.h
58483 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58484 acpi_op_bind bind;
58485 acpi_op_unbind unbind;
58486 acpi_op_notify notify;
58487 -};
58488 +} __no_const;
58489
58490 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58491
58492 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58493 index b7babf0..71e4e74 100644
58494 --- a/include/asm-generic/atomic-long.h
58495 +++ b/include/asm-generic/atomic-long.h
58496 @@ -22,6 +22,12 @@
58497
58498 typedef atomic64_t atomic_long_t;
58499
58500 +#ifdef CONFIG_PAX_REFCOUNT
58501 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58502 +#else
58503 +typedef atomic64_t atomic_long_unchecked_t;
58504 +#endif
58505 +
58506 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58507
58508 static inline long atomic_long_read(atomic_long_t *l)
58509 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58510 return (long)atomic64_read(v);
58511 }
58512
58513 +#ifdef CONFIG_PAX_REFCOUNT
58514 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58515 +{
58516 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58517 +
58518 + return (long)atomic64_read_unchecked(v);
58519 +}
58520 +#endif
58521 +
58522 static inline void atomic_long_set(atomic_long_t *l, long i)
58523 {
58524 atomic64_t *v = (atomic64_t *)l;
58525 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58526 atomic64_set(v, i);
58527 }
58528
58529 +#ifdef CONFIG_PAX_REFCOUNT
58530 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58531 +{
58532 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58533 +
58534 + atomic64_set_unchecked(v, i);
58535 +}
58536 +#endif
58537 +
58538 static inline void atomic_long_inc(atomic_long_t *l)
58539 {
58540 atomic64_t *v = (atomic64_t *)l;
58541 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58542 atomic64_inc(v);
58543 }
58544
58545 +#ifdef CONFIG_PAX_REFCOUNT
58546 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58547 +{
58548 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58549 +
58550 + atomic64_inc_unchecked(v);
58551 +}
58552 +#endif
58553 +
58554 static inline void atomic_long_dec(atomic_long_t *l)
58555 {
58556 atomic64_t *v = (atomic64_t *)l;
58557 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58558 atomic64_dec(v);
58559 }
58560
58561 +#ifdef CONFIG_PAX_REFCOUNT
58562 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58563 +{
58564 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58565 +
58566 + atomic64_dec_unchecked(v);
58567 +}
58568 +#endif
58569 +
58570 static inline void atomic_long_add(long i, atomic_long_t *l)
58571 {
58572 atomic64_t *v = (atomic64_t *)l;
58573 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58574 atomic64_add(i, v);
58575 }
58576
58577 +#ifdef CONFIG_PAX_REFCOUNT
58578 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58579 +{
58580 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58581 +
58582 + atomic64_add_unchecked(i, v);
58583 +}
58584 +#endif
58585 +
58586 static inline void atomic_long_sub(long i, atomic_long_t *l)
58587 {
58588 atomic64_t *v = (atomic64_t *)l;
58589 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58590 atomic64_sub(i, v);
58591 }
58592
58593 +#ifdef CONFIG_PAX_REFCOUNT
58594 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58595 +{
58596 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58597 +
58598 + atomic64_sub_unchecked(i, v);
58599 +}
58600 +#endif
58601 +
58602 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58603 {
58604 atomic64_t *v = (atomic64_t *)l;
58605 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58606 return (long)atomic64_inc_return(v);
58607 }
58608
58609 +#ifdef CONFIG_PAX_REFCOUNT
58610 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58611 +{
58612 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58613 +
58614 + return (long)atomic64_inc_return_unchecked(v);
58615 +}
58616 +#endif
58617 +
58618 static inline long atomic_long_dec_return(atomic_long_t *l)
58619 {
58620 atomic64_t *v = (atomic64_t *)l;
58621 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58622
58623 typedef atomic_t atomic_long_t;
58624
58625 +#ifdef CONFIG_PAX_REFCOUNT
58626 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58627 +#else
58628 +typedef atomic_t atomic_long_unchecked_t;
58629 +#endif
58630 +
58631 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58632 static inline long atomic_long_read(atomic_long_t *l)
58633 {
58634 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58635 return (long)atomic_read(v);
58636 }
58637
58638 +#ifdef CONFIG_PAX_REFCOUNT
58639 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58640 +{
58641 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58642 +
58643 + return (long)atomic_read_unchecked(v);
58644 +}
58645 +#endif
58646 +
58647 static inline void atomic_long_set(atomic_long_t *l, long i)
58648 {
58649 atomic_t *v = (atomic_t *)l;
58650 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58651 atomic_set(v, i);
58652 }
58653
58654 +#ifdef CONFIG_PAX_REFCOUNT
58655 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58656 +{
58657 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58658 +
58659 + atomic_set_unchecked(v, i);
58660 +}
58661 +#endif
58662 +
58663 static inline void atomic_long_inc(atomic_long_t *l)
58664 {
58665 atomic_t *v = (atomic_t *)l;
58666 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58667 atomic_inc(v);
58668 }
58669
58670 +#ifdef CONFIG_PAX_REFCOUNT
58671 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58672 +{
58673 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58674 +
58675 + atomic_inc_unchecked(v);
58676 +}
58677 +#endif
58678 +
58679 static inline void atomic_long_dec(atomic_long_t *l)
58680 {
58681 atomic_t *v = (atomic_t *)l;
58682 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58683 atomic_dec(v);
58684 }
58685
58686 +#ifdef CONFIG_PAX_REFCOUNT
58687 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58688 +{
58689 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58690 +
58691 + atomic_dec_unchecked(v);
58692 +}
58693 +#endif
58694 +
58695 static inline void atomic_long_add(long i, atomic_long_t *l)
58696 {
58697 atomic_t *v = (atomic_t *)l;
58698 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58699 atomic_add(i, v);
58700 }
58701
58702 +#ifdef CONFIG_PAX_REFCOUNT
58703 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58704 +{
58705 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58706 +
58707 + atomic_add_unchecked(i, v);
58708 +}
58709 +#endif
58710 +
58711 static inline void atomic_long_sub(long i, atomic_long_t *l)
58712 {
58713 atomic_t *v = (atomic_t *)l;
58714 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58715 atomic_sub(i, v);
58716 }
58717
58718 +#ifdef CONFIG_PAX_REFCOUNT
58719 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58720 +{
58721 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58722 +
58723 + atomic_sub_unchecked(i, v);
58724 +}
58725 +#endif
58726 +
58727 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58728 {
58729 atomic_t *v = (atomic_t *)l;
58730 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58731 return (long)atomic_inc_return(v);
58732 }
58733
58734 +#ifdef CONFIG_PAX_REFCOUNT
58735 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58736 +{
58737 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58738 +
58739 + return (long)atomic_inc_return_unchecked(v);
58740 +}
58741 +#endif
58742 +
58743 static inline long atomic_long_dec_return(atomic_long_t *l)
58744 {
58745 atomic_t *v = (atomic_t *)l;
58746 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58747
58748 #endif /* BITS_PER_LONG == 64 */
58749
58750 +#ifdef CONFIG_PAX_REFCOUNT
58751 +static inline void pax_refcount_needs_these_functions(void)
58752 +{
58753 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
58754 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
58755 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
58756 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
58757 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
58758 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
58759 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
58760 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
58761 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
58762 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
58763 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
58764 +
58765 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
58766 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
58767 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
58768 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
58769 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
58770 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
58771 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
58772 +}
58773 +#else
58774 +#define atomic_read_unchecked(v) atomic_read(v)
58775 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
58776 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
58777 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
58778 +#define atomic_inc_unchecked(v) atomic_inc(v)
58779 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
58780 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
58781 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
58782 +#define atomic_dec_unchecked(v) atomic_dec(v)
58783 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
58784 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
58785 +
58786 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
58787 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
58788 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
58789 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
58790 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
58791 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
58792 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
58793 +#endif
58794 +
58795 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
58796 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
58797 index b18ce4f..2ee2843 100644
58798 --- a/include/asm-generic/atomic64.h
58799 +++ b/include/asm-generic/atomic64.h
58800 @@ -16,6 +16,8 @@ typedef struct {
58801 long long counter;
58802 } atomic64_t;
58803
58804 +typedef atomic64_t atomic64_unchecked_t;
58805 +
58806 #define ATOMIC64_INIT(i) { (i) }
58807
58808 extern long long atomic64_read(const atomic64_t *v);
58809 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
58810 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
58811 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
58812
58813 +#define atomic64_read_unchecked(v) atomic64_read(v)
58814 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
58815 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
58816 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
58817 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
58818 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
58819 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
58820 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
58821 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
58822 +
58823 #endif /* _ASM_GENERIC_ATOMIC64_H */
58824 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
58825 index 1bfcfe5..e04c5c9 100644
58826 --- a/include/asm-generic/cache.h
58827 +++ b/include/asm-generic/cache.h
58828 @@ -6,7 +6,7 @@
58829 * cache lines need to provide their own cache.h.
58830 */
58831
58832 -#define L1_CACHE_SHIFT 5
58833 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
58834 +#define L1_CACHE_SHIFT 5UL
58835 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
58836
58837 #endif /* __ASM_GENERIC_CACHE_H */
58838 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
58839 index 0d68a1e..b74a761 100644
58840 --- a/include/asm-generic/emergency-restart.h
58841 +++ b/include/asm-generic/emergency-restart.h
58842 @@ -1,7 +1,7 @@
58843 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
58844 #define _ASM_GENERIC_EMERGENCY_RESTART_H
58845
58846 -static inline void machine_emergency_restart(void)
58847 +static inline __noreturn void machine_emergency_restart(void)
58848 {
58849 machine_restart(NULL);
58850 }
58851 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
58852 index 1ca3efc..e3dc852 100644
58853 --- a/include/asm-generic/int-l64.h
58854 +++ b/include/asm-generic/int-l64.h
58855 @@ -46,6 +46,8 @@ typedef unsigned int u32;
58856 typedef signed long s64;
58857 typedef unsigned long u64;
58858
58859 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
58860 +
58861 #define S8_C(x) x
58862 #define U8_C(x) x ## U
58863 #define S16_C(x) x
58864 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
58865 index f394147..b6152b9 100644
58866 --- a/include/asm-generic/int-ll64.h
58867 +++ b/include/asm-generic/int-ll64.h
58868 @@ -51,6 +51,8 @@ typedef unsigned int u32;
58869 typedef signed long long s64;
58870 typedef unsigned long long u64;
58871
58872 +typedef unsigned long long intoverflow_t;
58873 +
58874 #define S8_C(x) x
58875 #define U8_C(x) x ## U
58876 #define S16_C(x) x
58877 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
58878 index 0232ccb..13d9165 100644
58879 --- a/include/asm-generic/kmap_types.h
58880 +++ b/include/asm-generic/kmap_types.h
58881 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
58882 KMAP_D(17) KM_NMI,
58883 KMAP_D(18) KM_NMI_PTE,
58884 KMAP_D(19) KM_KDB,
58885 +KMAP_D(20) KM_CLEARPAGE,
58886 /*
58887 * Remember to update debug_kmap_atomic() when adding new kmap types!
58888 */
58889 -KMAP_D(20) KM_TYPE_NR
58890 +KMAP_D(21) KM_TYPE_NR
58891 };
58892
58893 #undef KMAP_D
58894 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
58895 index 9ceb03b..2efbcbd 100644
58896 --- a/include/asm-generic/local.h
58897 +++ b/include/asm-generic/local.h
58898 @@ -39,6 +39,7 @@ typedef struct
58899 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
58900 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
58901 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
58902 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
58903
58904 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
58905 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
58906 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
58907 index 725612b..9cc513a 100644
58908 --- a/include/asm-generic/pgtable-nopmd.h
58909 +++ b/include/asm-generic/pgtable-nopmd.h
58910 @@ -1,14 +1,19 @@
58911 #ifndef _PGTABLE_NOPMD_H
58912 #define _PGTABLE_NOPMD_H
58913
58914 -#ifndef __ASSEMBLY__
58915 -
58916 #include <asm-generic/pgtable-nopud.h>
58917
58918 -struct mm_struct;
58919 -
58920 #define __PAGETABLE_PMD_FOLDED
58921
58922 +#define PMD_SHIFT PUD_SHIFT
58923 +#define PTRS_PER_PMD 1
58924 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
58925 +#define PMD_MASK (~(PMD_SIZE-1))
58926 +
58927 +#ifndef __ASSEMBLY__
58928 +
58929 +struct mm_struct;
58930 +
58931 /*
58932 * Having the pmd type consist of a pud gets the size right, and allows
58933 * us to conceptually access the pud entry that this pmd is folded into
58934 @@ -16,11 +21,6 @@ struct mm_struct;
58935 */
58936 typedef struct { pud_t pud; } pmd_t;
58937
58938 -#define PMD_SHIFT PUD_SHIFT
58939 -#define PTRS_PER_PMD 1
58940 -#define PMD_SIZE (1UL << PMD_SHIFT)
58941 -#define PMD_MASK (~(PMD_SIZE-1))
58942 -
58943 /*
58944 * The "pud_xxx()" functions here are trivial for a folded two-level
58945 * setup: the pmd is never bad, and a pmd always exists (as it's folded
58946 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
58947 index 810431d..ccc3638 100644
58948 --- a/include/asm-generic/pgtable-nopud.h
58949 +++ b/include/asm-generic/pgtable-nopud.h
58950 @@ -1,10 +1,15 @@
58951 #ifndef _PGTABLE_NOPUD_H
58952 #define _PGTABLE_NOPUD_H
58953
58954 -#ifndef __ASSEMBLY__
58955 -
58956 #define __PAGETABLE_PUD_FOLDED
58957
58958 +#define PUD_SHIFT PGDIR_SHIFT
58959 +#define PTRS_PER_PUD 1
58960 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
58961 +#define PUD_MASK (~(PUD_SIZE-1))
58962 +
58963 +#ifndef __ASSEMBLY__
58964 +
58965 /*
58966 * Having the pud type consist of a pgd gets the size right, and allows
58967 * us to conceptually access the pgd entry that this pud is folded into
58968 @@ -12,11 +17,6 @@
58969 */
58970 typedef struct { pgd_t pgd; } pud_t;
58971
58972 -#define PUD_SHIFT PGDIR_SHIFT
58973 -#define PTRS_PER_PUD 1
58974 -#define PUD_SIZE (1UL << PUD_SHIFT)
58975 -#define PUD_MASK (~(PUD_SIZE-1))
58976 -
58977 /*
58978 * The "pgd_xxx()" functions here are trivial for a folded two-level
58979 * setup: the pud is never bad, and a pud always exists (as it's folded
58980 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
58981 index 76bff2b..c7a14e2 100644
58982 --- a/include/asm-generic/pgtable.h
58983 +++ b/include/asm-generic/pgtable.h
58984 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
58985 #endif /* __HAVE_ARCH_PMD_WRITE */
58986 #endif
58987
58988 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
58989 +static inline unsigned long pax_open_kernel(void) { return 0; }
58990 +#endif
58991 +
58992 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
58993 +static inline unsigned long pax_close_kernel(void) { return 0; }
58994 +#endif
58995 +
58996 #endif /* !__ASSEMBLY__ */
58997
58998 #endif /* _ASM_GENERIC_PGTABLE_H */
58999 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59000 index b5e2e4c..6a5373e 100644
59001 --- a/include/asm-generic/vmlinux.lds.h
59002 +++ b/include/asm-generic/vmlinux.lds.h
59003 @@ -217,6 +217,7 @@
59004 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59005 VMLINUX_SYMBOL(__start_rodata) = .; \
59006 *(.rodata) *(.rodata.*) \
59007 + *(.data..read_only) \
59008 *(__vermagic) /* Kernel version magic */ \
59009 . = ALIGN(8); \
59010 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59011 @@ -722,17 +723,18 @@
59012 * section in the linker script will go there too. @phdr should have
59013 * a leading colon.
59014 *
59015 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59016 + * Note that this macros defines per_cpu_load as an absolute symbol.
59017 * If there is no need to put the percpu section at a predetermined
59018 * address, use PERCPU_SECTION.
59019 */
59020 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59021 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59022 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59023 + per_cpu_load = .; \
59024 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59025 - LOAD_OFFSET) { \
59026 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59027 PERCPU_INPUT(cacheline) \
59028 } phdr \
59029 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59030 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59031
59032 /**
59033 * PERCPU_SECTION - define output section for percpu area, simple version
59034 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59035 index 92f0981..d44a37c 100644
59036 --- a/include/drm/drmP.h
59037 +++ b/include/drm/drmP.h
59038 @@ -72,6 +72,7 @@
59039 #include <linux/workqueue.h>
59040 #include <linux/poll.h>
59041 #include <asm/pgalloc.h>
59042 +#include <asm/local.h>
59043 #include "drm.h"
59044
59045 #include <linux/idr.h>
59046 @@ -1038,7 +1039,7 @@ struct drm_device {
59047
59048 /** \name Usage Counters */
59049 /*@{ */
59050 - int open_count; /**< Outstanding files open */
59051 + local_t open_count; /**< Outstanding files open */
59052 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59053 atomic_t vma_count; /**< Outstanding vma areas open */
59054 int buf_use; /**< Buffers in use -- cannot alloc */
59055 @@ -1049,7 +1050,7 @@ struct drm_device {
59056 /*@{ */
59057 unsigned long counters;
59058 enum drm_stat_type types[15];
59059 - atomic_t counts[15];
59060 + atomic_unchecked_t counts[15];
59061 /*@} */
59062
59063 struct list_head filelist;
59064 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59065 index 37515d1..34fa8b0 100644
59066 --- a/include/drm/drm_crtc_helper.h
59067 +++ b/include/drm/drm_crtc_helper.h
59068 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59069
59070 /* disable crtc when not in use - more explicit than dpms off */
59071 void (*disable)(struct drm_crtc *crtc);
59072 -};
59073 +} __no_const;
59074
59075 struct drm_encoder_helper_funcs {
59076 void (*dpms)(struct drm_encoder *encoder, int mode);
59077 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59078 struct drm_connector *connector);
59079 /* disable encoder when not in use - more explicit than dpms off */
59080 void (*disable)(struct drm_encoder *encoder);
59081 -};
59082 +} __no_const;
59083
59084 struct drm_connector_helper_funcs {
59085 int (*get_modes)(struct drm_connector *connector);
59086 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59087 index 26c1f78..6722682 100644
59088 --- a/include/drm/ttm/ttm_memory.h
59089 +++ b/include/drm/ttm/ttm_memory.h
59090 @@ -47,7 +47,7 @@
59091
59092 struct ttm_mem_shrink {
59093 int (*do_shrink) (struct ttm_mem_shrink *);
59094 -};
59095 +} __no_const;
59096
59097 /**
59098 * struct ttm_mem_global - Global memory accounting structure.
59099 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59100 index e86dfca..40cc55f 100644
59101 --- a/include/linux/a.out.h
59102 +++ b/include/linux/a.out.h
59103 @@ -39,6 +39,14 @@ enum machine_type {
59104 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59105 };
59106
59107 +/* Constants for the N_FLAGS field */
59108 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59109 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59110 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59111 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59112 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59113 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59114 +
59115 #if !defined (N_MAGIC)
59116 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59117 #endif
59118 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59119 index f4ff882..84b53a6 100644
59120 --- a/include/linux/atmdev.h
59121 +++ b/include/linux/atmdev.h
59122 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59123 #endif
59124
59125 struct k_atm_aal_stats {
59126 -#define __HANDLE_ITEM(i) atomic_t i
59127 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59128 __AAL_STAT_ITEMS
59129 #undef __HANDLE_ITEM
59130 };
59131 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59132 index 0092102..8a801b4 100644
59133 --- a/include/linux/binfmts.h
59134 +++ b/include/linux/binfmts.h
59135 @@ -89,6 +89,7 @@ struct linux_binfmt {
59136 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59137 int (*load_shlib)(struct file *);
59138 int (*core_dump)(struct coredump_params *cprm);
59139 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59140 unsigned long min_coredump; /* minimal dump size */
59141 };
59142
59143 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59144 index 606cf33..b72c577 100644
59145 --- a/include/linux/blkdev.h
59146 +++ b/include/linux/blkdev.h
59147 @@ -1379,7 +1379,7 @@ struct block_device_operations {
59148 /* this callback is with swap_lock and sometimes page table lock held */
59149 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59150 struct module *owner;
59151 -};
59152 +} __do_const;
59153
59154 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59155 unsigned long);
59156 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59157 index 4d1a074..88f929a 100644
59158 --- a/include/linux/blktrace_api.h
59159 +++ b/include/linux/blktrace_api.h
59160 @@ -162,7 +162,7 @@ struct blk_trace {
59161 struct dentry *dir;
59162 struct dentry *dropped_file;
59163 struct dentry *msg_file;
59164 - atomic_t dropped;
59165 + atomic_unchecked_t dropped;
59166 };
59167
59168 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59169 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59170 index 83195fb..0b0f77d 100644
59171 --- a/include/linux/byteorder/little_endian.h
59172 +++ b/include/linux/byteorder/little_endian.h
59173 @@ -42,51 +42,51 @@
59174
59175 static inline __le64 __cpu_to_le64p(const __u64 *p)
59176 {
59177 - return (__force __le64)*p;
59178 + return (__force const __le64)*p;
59179 }
59180 static inline __u64 __le64_to_cpup(const __le64 *p)
59181 {
59182 - return (__force __u64)*p;
59183 + return (__force const __u64)*p;
59184 }
59185 static inline __le32 __cpu_to_le32p(const __u32 *p)
59186 {
59187 - return (__force __le32)*p;
59188 + return (__force const __le32)*p;
59189 }
59190 static inline __u32 __le32_to_cpup(const __le32 *p)
59191 {
59192 - return (__force __u32)*p;
59193 + return (__force const __u32)*p;
59194 }
59195 static inline __le16 __cpu_to_le16p(const __u16 *p)
59196 {
59197 - return (__force __le16)*p;
59198 + return (__force const __le16)*p;
59199 }
59200 static inline __u16 __le16_to_cpup(const __le16 *p)
59201 {
59202 - return (__force __u16)*p;
59203 + return (__force const __u16)*p;
59204 }
59205 static inline __be64 __cpu_to_be64p(const __u64 *p)
59206 {
59207 - return (__force __be64)__swab64p(p);
59208 + return (__force const __be64)__swab64p(p);
59209 }
59210 static inline __u64 __be64_to_cpup(const __be64 *p)
59211 {
59212 - return __swab64p((__u64 *)p);
59213 + return __swab64p((const __u64 *)p);
59214 }
59215 static inline __be32 __cpu_to_be32p(const __u32 *p)
59216 {
59217 - return (__force __be32)__swab32p(p);
59218 + return (__force const __be32)__swab32p(p);
59219 }
59220 static inline __u32 __be32_to_cpup(const __be32 *p)
59221 {
59222 - return __swab32p((__u32 *)p);
59223 + return __swab32p((const __u32 *)p);
59224 }
59225 static inline __be16 __cpu_to_be16p(const __u16 *p)
59226 {
59227 - return (__force __be16)__swab16p(p);
59228 + return (__force const __be16)__swab16p(p);
59229 }
59230 static inline __u16 __be16_to_cpup(const __be16 *p)
59231 {
59232 - return __swab16p((__u16 *)p);
59233 + return __swab16p((const __u16 *)p);
59234 }
59235 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59236 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59237 diff --git a/include/linux/cache.h b/include/linux/cache.h
59238 index 4c57065..4307975 100644
59239 --- a/include/linux/cache.h
59240 +++ b/include/linux/cache.h
59241 @@ -16,6 +16,10 @@
59242 #define __read_mostly
59243 #endif
59244
59245 +#ifndef __read_only
59246 +#define __read_only __read_mostly
59247 +#endif
59248 +
59249 #ifndef ____cacheline_aligned
59250 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59251 #endif
59252 diff --git a/include/linux/capability.h b/include/linux/capability.h
59253 index 12d52de..b5f7fa7 100644
59254 --- a/include/linux/capability.h
59255 +++ b/include/linux/capability.h
59256 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59257 extern bool capable(int cap);
59258 extern bool ns_capable(struct user_namespace *ns, int cap);
59259 extern bool nsown_capable(int cap);
59260 +extern bool capable_nolog(int cap);
59261 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59262
59263 /* audit system wants to get cap info from files as well */
59264 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59265 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59266 index 04ffb2e..6799180 100644
59267 --- a/include/linux/cleancache.h
59268 +++ b/include/linux/cleancache.h
59269 @@ -31,7 +31,7 @@ struct cleancache_ops {
59270 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
59271 void (*flush_inode)(int, struct cleancache_filekey);
59272 void (*flush_fs)(int);
59273 -};
59274 +} __no_const;
59275
59276 extern struct cleancache_ops
59277 cleancache_register_ops(struct cleancache_ops *ops);
59278 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59279 index 2f40791..89a56fd 100644
59280 --- a/include/linux/compiler-gcc4.h
59281 +++ b/include/linux/compiler-gcc4.h
59282 @@ -32,6 +32,12 @@
59283 #define __linktime_error(message) __attribute__((__error__(message)))
59284
59285 #if __GNUC_MINOR__ >= 5
59286 +
59287 +#ifdef CONSTIFY_PLUGIN
59288 +#define __no_const __attribute__((no_const))
59289 +#define __do_const __attribute__((do_const))
59290 +#endif
59291 +
59292 /*
59293 * Mark a position in code as unreachable. This can be used to
59294 * suppress control flow warnings after asm blocks that transfer
59295 @@ -47,6 +53,11 @@
59296 #define __noclone __attribute__((__noclone__))
59297
59298 #endif
59299 +
59300 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59301 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59302 +#define __bos0(ptr) __bos((ptr), 0)
59303 +#define __bos1(ptr) __bos((ptr), 1)
59304 #endif
59305
59306 #if __GNUC_MINOR__ > 0
59307 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59308 index 4a24354..9570c1b 100644
59309 --- a/include/linux/compiler.h
59310 +++ b/include/linux/compiler.h
59311 @@ -5,31 +5,62 @@
59312
59313 #ifdef __CHECKER__
59314 # define __user __attribute__((noderef, address_space(1)))
59315 +# define __force_user __force __user
59316 # define __kernel __attribute__((address_space(0)))
59317 +# define __force_kernel __force __kernel
59318 # define __safe __attribute__((safe))
59319 # define __force __attribute__((force))
59320 # define __nocast __attribute__((nocast))
59321 # define __iomem __attribute__((noderef, address_space(2)))
59322 +# define __force_iomem __force __iomem
59323 # define __acquires(x) __attribute__((context(x,0,1)))
59324 # define __releases(x) __attribute__((context(x,1,0)))
59325 # define __acquire(x) __context__(x,1)
59326 # define __release(x) __context__(x,-1)
59327 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59328 # define __percpu __attribute__((noderef, address_space(3)))
59329 +# define __force_percpu __force __percpu
59330 #ifdef CONFIG_SPARSE_RCU_POINTER
59331 # define __rcu __attribute__((noderef, address_space(4)))
59332 +# define __force_rcu __force __rcu
59333 #else
59334 # define __rcu
59335 +# define __force_rcu
59336 #endif
59337 extern void __chk_user_ptr(const volatile void __user *);
59338 extern void __chk_io_ptr(const volatile void __iomem *);
59339 +#elif defined(CHECKER_PLUGIN)
59340 +//# define __user
59341 +//# define __force_user
59342 +//# define __kernel
59343 +//# define __force_kernel
59344 +# define __safe
59345 +# define __force
59346 +# define __nocast
59347 +# define __iomem
59348 +# define __force_iomem
59349 +# define __chk_user_ptr(x) (void)0
59350 +# define __chk_io_ptr(x) (void)0
59351 +# define __builtin_warning(x, y...) (1)
59352 +# define __acquires(x)
59353 +# define __releases(x)
59354 +# define __acquire(x) (void)0
59355 +# define __release(x) (void)0
59356 +# define __cond_lock(x,c) (c)
59357 +# define __percpu
59358 +# define __force_percpu
59359 +# define __rcu
59360 +# define __force_rcu
59361 #else
59362 # define __user
59363 +# define __force_user
59364 # define __kernel
59365 +# define __force_kernel
59366 # define __safe
59367 # define __force
59368 # define __nocast
59369 # define __iomem
59370 +# define __force_iomem
59371 # define __chk_user_ptr(x) (void)0
59372 # define __chk_io_ptr(x) (void)0
59373 # define __builtin_warning(x, y...) (1)
59374 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59375 # define __release(x) (void)0
59376 # define __cond_lock(x,c) (c)
59377 # define __percpu
59378 +# define __force_percpu
59379 # define __rcu
59380 +# define __force_rcu
59381 #endif
59382
59383 #ifdef __KERNEL__
59384 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59385 # define __attribute_const__ /* unimplemented */
59386 #endif
59387
59388 +#ifndef __no_const
59389 +# define __no_const
59390 +#endif
59391 +
59392 +#ifndef __do_const
59393 +# define __do_const
59394 +#endif
59395 +
59396 /*
59397 * Tell gcc if a function is cold. The compiler will assume any path
59398 * directly leading to the call is unlikely.
59399 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59400 #define __cold
59401 #endif
59402
59403 +#ifndef __alloc_size
59404 +#define __alloc_size(...)
59405 +#endif
59406 +
59407 +#ifndef __bos
59408 +#define __bos(ptr, arg)
59409 +#endif
59410 +
59411 +#ifndef __bos0
59412 +#define __bos0(ptr)
59413 +#endif
59414 +
59415 +#ifndef __bos1
59416 +#define __bos1(ptr)
59417 +#endif
59418 +
59419 /* Simple shorthand for a section definition */
59420 #ifndef __section
59421 # define __section(S) __attribute__ ((__section__(#S)))
59422 @@ -308,6 +365,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59423 * use is to mediate communication between process-level code and irq/NMI
59424 * handlers, all running on the same CPU.
59425 */
59426 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59427 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59428 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59429
59430 #endif /* __LINUX_COMPILER_H */
59431 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
59432 index e9eaec5..bfeb9bb 100644
59433 --- a/include/linux/cpuset.h
59434 +++ b/include/linux/cpuset.h
59435 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
59436 * nodemask.
59437 */
59438 smp_mb();
59439 - --ACCESS_ONCE(current->mems_allowed_change_disable);
59440 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
59441 }
59442
59443 static inline void set_mems_allowed(nodemask_t nodemask)
59444 diff --git a/include/linux/cred.h b/include/linux/cred.h
59445 index adadf71..6af5560 100644
59446 --- a/include/linux/cred.h
59447 +++ b/include/linux/cred.h
59448 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59449 static inline void validate_process_creds(void)
59450 {
59451 }
59452 +static inline void validate_task_creds(struct task_struct *task)
59453 +{
59454 +}
59455 #endif
59456
59457 /**
59458 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59459 index 8a94217..15d49e3 100644
59460 --- a/include/linux/crypto.h
59461 +++ b/include/linux/crypto.h
59462 @@ -365,7 +365,7 @@ struct cipher_tfm {
59463 const u8 *key, unsigned int keylen);
59464 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59465 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59466 -};
59467 +} __no_const;
59468
59469 struct hash_tfm {
59470 int (*init)(struct hash_desc *desc);
59471 @@ -386,13 +386,13 @@ struct compress_tfm {
59472 int (*cot_decompress)(struct crypto_tfm *tfm,
59473 const u8 *src, unsigned int slen,
59474 u8 *dst, unsigned int *dlen);
59475 -};
59476 +} __no_const;
59477
59478 struct rng_tfm {
59479 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59480 unsigned int dlen);
59481 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59482 -};
59483 +} __no_const;
59484
59485 #define crt_ablkcipher crt_u.ablkcipher
59486 #define crt_aead crt_u.aead
59487 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59488 index 7925bf0..d5143d2 100644
59489 --- a/include/linux/decompress/mm.h
59490 +++ b/include/linux/decompress/mm.h
59491 @@ -77,7 +77,7 @@ static void free(void *where)
59492 * warnings when not needed (indeed large_malloc / large_free are not
59493 * needed by inflate */
59494
59495 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59496 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59497 #define free(a) kfree(a)
59498
59499 #define large_malloc(a) vmalloc(a)
59500 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59501 index e13117c..e9fc938 100644
59502 --- a/include/linux/dma-mapping.h
59503 +++ b/include/linux/dma-mapping.h
59504 @@ -46,7 +46,7 @@ struct dma_map_ops {
59505 u64 (*get_required_mask)(struct device *dev);
59506 #endif
59507 int is_phys;
59508 -};
59509 +} __do_const;
59510
59511 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59512
59513 diff --git a/include/linux/efi.h b/include/linux/efi.h
59514 index 37c3007..92ab679 100644
59515 --- a/include/linux/efi.h
59516 +++ b/include/linux/efi.h
59517 @@ -580,7 +580,7 @@ struct efivar_operations {
59518 efi_get_variable_t *get_variable;
59519 efi_get_next_variable_t *get_next_variable;
59520 efi_set_variable_t *set_variable;
59521 -};
59522 +} __no_const;
59523
59524 struct efivars {
59525 /*
59526 diff --git a/include/linux/elf.h b/include/linux/elf.h
59527 index 999b4f5..57753b4 100644
59528 --- a/include/linux/elf.h
59529 +++ b/include/linux/elf.h
59530 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59531 #define PT_GNU_EH_FRAME 0x6474e550
59532
59533 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59534 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59535 +
59536 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59537 +
59538 +/* Constants for the e_flags field */
59539 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59540 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59541 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59542 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59543 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59544 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59545
59546 /*
59547 * Extended Numbering
59548 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59549 #define DT_DEBUG 21
59550 #define DT_TEXTREL 22
59551 #define DT_JMPREL 23
59552 +#define DT_FLAGS 30
59553 + #define DF_TEXTREL 0x00000004
59554 #define DT_ENCODING 32
59555 #define OLD_DT_LOOS 0x60000000
59556 #define DT_LOOS 0x6000000d
59557 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59558 #define PF_W 0x2
59559 #define PF_X 0x1
59560
59561 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59562 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59563 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59564 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59565 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59566 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59567 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59568 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59569 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59570 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59571 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59572 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59573 +
59574 typedef struct elf32_phdr{
59575 Elf32_Word p_type;
59576 Elf32_Off p_offset;
59577 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59578 #define EI_OSABI 7
59579 #define EI_PAD 8
59580
59581 +#define EI_PAX 14
59582 +
59583 #define ELFMAG0 0x7f /* EI_MAG */
59584 #define ELFMAG1 'E'
59585 #define ELFMAG2 'L'
59586 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59587 #define elf_note elf32_note
59588 #define elf_addr_t Elf32_Off
59589 #define Elf_Half Elf32_Half
59590 +#define elf_dyn Elf32_Dyn
59591
59592 #else
59593
59594 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
59595 #define elf_note elf64_note
59596 #define elf_addr_t Elf64_Off
59597 #define Elf_Half Elf64_Half
59598 +#define elf_dyn Elf64_Dyn
59599
59600 #endif
59601
59602 diff --git a/include/linux/filter.h b/include/linux/filter.h
59603 index 8eeb205..d59bfa2 100644
59604 --- a/include/linux/filter.h
59605 +++ b/include/linux/filter.h
59606 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59607
59608 struct sk_buff;
59609 struct sock;
59610 +struct bpf_jit_work;
59611
59612 struct sk_filter
59613 {
59614 @@ -141,6 +142,9 @@ struct sk_filter
59615 unsigned int len; /* Number of filter blocks */
59616 unsigned int (*bpf_func)(const struct sk_buff *skb,
59617 const struct sock_filter *filter);
59618 +#ifdef CONFIG_BPF_JIT
59619 + struct bpf_jit_work *work;
59620 +#endif
59621 struct rcu_head rcu;
59622 struct sock_filter insns[0];
59623 };
59624 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59625 index 84ccf8e..2e9b14c 100644
59626 --- a/include/linux/firewire.h
59627 +++ b/include/linux/firewire.h
59628 @@ -428,7 +428,7 @@ struct fw_iso_context {
59629 union {
59630 fw_iso_callback_t sc;
59631 fw_iso_mc_callback_t mc;
59632 - } callback;
59633 + } __no_const callback;
59634 void *callback_data;
59635 };
59636
59637 diff --git a/include/linux/fs.h b/include/linux/fs.h
59638 index 69cd5bb..58425c2 100644
59639 --- a/include/linux/fs.h
59640 +++ b/include/linux/fs.h
59641 @@ -1623,7 +1623,8 @@ struct file_operations {
59642 int (*setlease)(struct file *, long, struct file_lock **);
59643 long (*fallocate)(struct file *file, int mode, loff_t offset,
59644 loff_t len);
59645 -};
59646 +} __do_const;
59647 +typedef struct file_operations __no_const file_operations_no_const;
59648
59649 struct inode_operations {
59650 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59651 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59652 index 003dc0f..3c4ea97 100644
59653 --- a/include/linux/fs_struct.h
59654 +++ b/include/linux/fs_struct.h
59655 @@ -6,7 +6,7 @@
59656 #include <linux/seqlock.h>
59657
59658 struct fs_struct {
59659 - int users;
59660 + atomic_t users;
59661 spinlock_t lock;
59662 seqcount_t seq;
59663 int umask;
59664 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
59665 index ce31408..b1ad003 100644
59666 --- a/include/linux/fscache-cache.h
59667 +++ b/include/linux/fscache-cache.h
59668 @@ -102,7 +102,7 @@ struct fscache_operation {
59669 fscache_operation_release_t release;
59670 };
59671
59672 -extern atomic_t fscache_op_debug_id;
59673 +extern atomic_unchecked_t fscache_op_debug_id;
59674 extern void fscache_op_work_func(struct work_struct *work);
59675
59676 extern void fscache_enqueue_operation(struct fscache_operation *);
59677 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
59678 {
59679 INIT_WORK(&op->work, fscache_op_work_func);
59680 atomic_set(&op->usage, 1);
59681 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59682 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59683 op->processor = processor;
59684 op->release = release;
59685 INIT_LIST_HEAD(&op->pend_link);
59686 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
59687 index 2a53f10..0187fdf 100644
59688 --- a/include/linux/fsnotify.h
59689 +++ b/include/linux/fsnotify.h
59690 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
59691 */
59692 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
59693 {
59694 - return kstrdup(name, GFP_KERNEL);
59695 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
59696 }
59697
59698 /*
59699 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
59700 index 91d0e0a3..035666b 100644
59701 --- a/include/linux/fsnotify_backend.h
59702 +++ b/include/linux/fsnotify_backend.h
59703 @@ -105,6 +105,7 @@ struct fsnotify_ops {
59704 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
59705 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
59706 };
59707 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
59708
59709 /*
59710 * A group is a "thing" that wants to receive notification about filesystem
59711 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
59712 index c3da42d..c70e0df 100644
59713 --- a/include/linux/ftrace_event.h
59714 +++ b/include/linux/ftrace_event.h
59715 @@ -97,7 +97,7 @@ struct trace_event_functions {
59716 trace_print_func raw;
59717 trace_print_func hex;
59718 trace_print_func binary;
59719 -};
59720 +} __no_const;
59721
59722 struct trace_event {
59723 struct hlist_node node;
59724 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
59725 extern int trace_add_event_call(struct ftrace_event_call *call);
59726 extern void trace_remove_event_call(struct ftrace_event_call *call);
59727
59728 -#define is_signed_type(type) (((type)(-1)) < 0)
59729 +#define is_signed_type(type) (((type)(-1)) < (type)1)
59730
59731 int trace_set_clr_event(const char *system, const char *event, int set);
59732
59733 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
59734 index e61d319..0da8505 100644
59735 --- a/include/linux/genhd.h
59736 +++ b/include/linux/genhd.h
59737 @@ -185,7 +185,7 @@ struct gendisk {
59738 struct kobject *slave_dir;
59739
59740 struct timer_rand_state *random;
59741 - atomic_t sync_io; /* RAID */
59742 + atomic_unchecked_t sync_io; /* RAID */
59743 struct disk_events *ev;
59744 #ifdef CONFIG_BLK_DEV_INTEGRITY
59745 struct blk_integrity *integrity;
59746 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
59747 new file mode 100644
59748 index 0000000..8a130b6
59749 --- /dev/null
59750 +++ b/include/linux/gracl.h
59751 @@ -0,0 +1,319 @@
59752 +#ifndef GR_ACL_H
59753 +#define GR_ACL_H
59754 +
59755 +#include <linux/grdefs.h>
59756 +#include <linux/resource.h>
59757 +#include <linux/capability.h>
59758 +#include <linux/dcache.h>
59759 +#include <asm/resource.h>
59760 +
59761 +/* Major status information */
59762 +
59763 +#define GR_VERSION "grsecurity 2.9"
59764 +#define GRSECURITY_VERSION 0x2900
59765 +
59766 +enum {
59767 + GR_SHUTDOWN = 0,
59768 + GR_ENABLE = 1,
59769 + GR_SPROLE = 2,
59770 + GR_RELOAD = 3,
59771 + GR_SEGVMOD = 4,
59772 + GR_STATUS = 5,
59773 + GR_UNSPROLE = 6,
59774 + GR_PASSSET = 7,
59775 + GR_SPROLEPAM = 8,
59776 +};
59777 +
59778 +/* Password setup definitions
59779 + * kernel/grhash.c */
59780 +enum {
59781 + GR_PW_LEN = 128,
59782 + GR_SALT_LEN = 16,
59783 + GR_SHA_LEN = 32,
59784 +};
59785 +
59786 +enum {
59787 + GR_SPROLE_LEN = 64,
59788 +};
59789 +
59790 +enum {
59791 + GR_NO_GLOB = 0,
59792 + GR_REG_GLOB,
59793 + GR_CREATE_GLOB
59794 +};
59795 +
59796 +#define GR_NLIMITS 32
59797 +
59798 +/* Begin Data Structures */
59799 +
59800 +struct sprole_pw {
59801 + unsigned char *rolename;
59802 + unsigned char salt[GR_SALT_LEN];
59803 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
59804 +};
59805 +
59806 +struct name_entry {
59807 + __u32 key;
59808 + ino_t inode;
59809 + dev_t device;
59810 + char *name;
59811 + __u16 len;
59812 + __u8 deleted;
59813 + struct name_entry *prev;
59814 + struct name_entry *next;
59815 +};
59816 +
59817 +struct inodev_entry {
59818 + struct name_entry *nentry;
59819 + struct inodev_entry *prev;
59820 + struct inodev_entry *next;
59821 +};
59822 +
59823 +struct acl_role_db {
59824 + struct acl_role_label **r_hash;
59825 + __u32 r_size;
59826 +};
59827 +
59828 +struct inodev_db {
59829 + struct inodev_entry **i_hash;
59830 + __u32 i_size;
59831 +};
59832 +
59833 +struct name_db {
59834 + struct name_entry **n_hash;
59835 + __u32 n_size;
59836 +};
59837 +
59838 +struct crash_uid {
59839 + uid_t uid;
59840 + unsigned long expires;
59841 +};
59842 +
59843 +struct gr_hash_struct {
59844 + void **table;
59845 + void **nametable;
59846 + void *first;
59847 + __u32 table_size;
59848 + __u32 used_size;
59849 + int type;
59850 +};
59851 +
59852 +/* Userspace Grsecurity ACL data structures */
59853 +
59854 +struct acl_subject_label {
59855 + char *filename;
59856 + ino_t inode;
59857 + dev_t device;
59858 + __u32 mode;
59859 + kernel_cap_t cap_mask;
59860 + kernel_cap_t cap_lower;
59861 + kernel_cap_t cap_invert_audit;
59862 +
59863 + struct rlimit res[GR_NLIMITS];
59864 + __u32 resmask;
59865 +
59866 + __u8 user_trans_type;
59867 + __u8 group_trans_type;
59868 + uid_t *user_transitions;
59869 + gid_t *group_transitions;
59870 + __u16 user_trans_num;
59871 + __u16 group_trans_num;
59872 +
59873 + __u32 sock_families[2];
59874 + __u32 ip_proto[8];
59875 + __u32 ip_type;
59876 + struct acl_ip_label **ips;
59877 + __u32 ip_num;
59878 + __u32 inaddr_any_override;
59879 +
59880 + __u32 crashes;
59881 + unsigned long expires;
59882 +
59883 + struct acl_subject_label *parent_subject;
59884 + struct gr_hash_struct *hash;
59885 + struct acl_subject_label *prev;
59886 + struct acl_subject_label *next;
59887 +
59888 + struct acl_object_label **obj_hash;
59889 + __u32 obj_hash_size;
59890 + __u16 pax_flags;
59891 +};
59892 +
59893 +struct role_allowed_ip {
59894 + __u32 addr;
59895 + __u32 netmask;
59896 +
59897 + struct role_allowed_ip *prev;
59898 + struct role_allowed_ip *next;
59899 +};
59900 +
59901 +struct role_transition {
59902 + char *rolename;
59903 +
59904 + struct role_transition *prev;
59905 + struct role_transition *next;
59906 +};
59907 +
59908 +struct acl_role_label {
59909 + char *rolename;
59910 + uid_t uidgid;
59911 + __u16 roletype;
59912 +
59913 + __u16 auth_attempts;
59914 + unsigned long expires;
59915 +
59916 + struct acl_subject_label *root_label;
59917 + struct gr_hash_struct *hash;
59918 +
59919 + struct acl_role_label *prev;
59920 + struct acl_role_label *next;
59921 +
59922 + struct role_transition *transitions;
59923 + struct role_allowed_ip *allowed_ips;
59924 + uid_t *domain_children;
59925 + __u16 domain_child_num;
59926 +
59927 + umode_t umask;
59928 +
59929 + struct acl_subject_label **subj_hash;
59930 + __u32 subj_hash_size;
59931 +};
59932 +
59933 +struct user_acl_role_db {
59934 + struct acl_role_label **r_table;
59935 + __u32 num_pointers; /* Number of allocations to track */
59936 + __u32 num_roles; /* Number of roles */
59937 + __u32 num_domain_children; /* Number of domain children */
59938 + __u32 num_subjects; /* Number of subjects */
59939 + __u32 num_objects; /* Number of objects */
59940 +};
59941 +
59942 +struct acl_object_label {
59943 + char *filename;
59944 + ino_t inode;
59945 + dev_t device;
59946 + __u32 mode;
59947 +
59948 + struct acl_subject_label *nested;
59949 + struct acl_object_label *globbed;
59950 +
59951 + /* next two structures not used */
59952 +
59953 + struct acl_object_label *prev;
59954 + struct acl_object_label *next;
59955 +};
59956 +
59957 +struct acl_ip_label {
59958 + char *iface;
59959 + __u32 addr;
59960 + __u32 netmask;
59961 + __u16 low, high;
59962 + __u8 mode;
59963 + __u32 type;
59964 + __u32 proto[8];
59965 +
59966 + /* next two structures not used */
59967 +
59968 + struct acl_ip_label *prev;
59969 + struct acl_ip_label *next;
59970 +};
59971 +
59972 +struct gr_arg {
59973 + struct user_acl_role_db role_db;
59974 + unsigned char pw[GR_PW_LEN];
59975 + unsigned char salt[GR_SALT_LEN];
59976 + unsigned char sum[GR_SHA_LEN];
59977 + unsigned char sp_role[GR_SPROLE_LEN];
59978 + struct sprole_pw *sprole_pws;
59979 + dev_t segv_device;
59980 + ino_t segv_inode;
59981 + uid_t segv_uid;
59982 + __u16 num_sprole_pws;
59983 + __u16 mode;
59984 +};
59985 +
59986 +struct gr_arg_wrapper {
59987 + struct gr_arg *arg;
59988 + __u32 version;
59989 + __u32 size;
59990 +};
59991 +
59992 +struct subject_map {
59993 + struct acl_subject_label *user;
59994 + struct acl_subject_label *kernel;
59995 + struct subject_map *prev;
59996 + struct subject_map *next;
59997 +};
59998 +
59999 +struct acl_subj_map_db {
60000 + struct subject_map **s_hash;
60001 + __u32 s_size;
60002 +};
60003 +
60004 +/* End Data Structures Section */
60005 +
60006 +/* Hash functions generated by empirical testing by Brad Spengler
60007 + Makes good use of the low bits of the inode. Generally 0-1 times
60008 + in loop for successful match. 0-3 for unsuccessful match.
60009 + Shift/add algorithm with modulus of table size and an XOR*/
60010 +
60011 +static __inline__ unsigned int
60012 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60013 +{
60014 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60015 +}
60016 +
60017 + static __inline__ unsigned int
60018 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60019 +{
60020 + return ((const unsigned long)userp % sz);
60021 +}
60022 +
60023 +static __inline__ unsigned int
60024 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60025 +{
60026 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60027 +}
60028 +
60029 +static __inline__ unsigned int
60030 +nhash(const char *name, const __u16 len, const unsigned int sz)
60031 +{
60032 + return full_name_hash((const unsigned char *)name, len) % sz;
60033 +}
60034 +
60035 +#define FOR_EACH_ROLE_START(role) \
60036 + role = role_list; \
60037 + while (role) {
60038 +
60039 +#define FOR_EACH_ROLE_END(role) \
60040 + role = role->prev; \
60041 + }
60042 +
60043 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60044 + subj = NULL; \
60045 + iter = 0; \
60046 + while (iter < role->subj_hash_size) { \
60047 + if (subj == NULL) \
60048 + subj = role->subj_hash[iter]; \
60049 + if (subj == NULL) { \
60050 + iter++; \
60051 + continue; \
60052 + }
60053 +
60054 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60055 + subj = subj->next; \
60056 + if (subj == NULL) \
60057 + iter++; \
60058 + }
60059 +
60060 +
60061 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60062 + subj = role->hash->first; \
60063 + while (subj != NULL) {
60064 +
60065 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60066 + subj = subj->next; \
60067 + }
60068 +
60069 +#endif
60070 +
60071 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60072 new file mode 100644
60073 index 0000000..323ecf2
60074 --- /dev/null
60075 +++ b/include/linux/gralloc.h
60076 @@ -0,0 +1,9 @@
60077 +#ifndef __GRALLOC_H
60078 +#define __GRALLOC_H
60079 +
60080 +void acl_free_all(void);
60081 +int acl_alloc_stack_init(unsigned long size);
60082 +void *acl_alloc(unsigned long len);
60083 +void *acl_alloc_num(unsigned long num, unsigned long len);
60084 +
60085 +#endif
60086 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60087 new file mode 100644
60088 index 0000000..b30e9bc
60089 --- /dev/null
60090 +++ b/include/linux/grdefs.h
60091 @@ -0,0 +1,140 @@
60092 +#ifndef GRDEFS_H
60093 +#define GRDEFS_H
60094 +
60095 +/* Begin grsecurity status declarations */
60096 +
60097 +enum {
60098 + GR_READY = 0x01,
60099 + GR_STATUS_INIT = 0x00 // disabled state
60100 +};
60101 +
60102 +/* Begin ACL declarations */
60103 +
60104 +/* Role flags */
60105 +
60106 +enum {
60107 + GR_ROLE_USER = 0x0001,
60108 + GR_ROLE_GROUP = 0x0002,
60109 + GR_ROLE_DEFAULT = 0x0004,
60110 + GR_ROLE_SPECIAL = 0x0008,
60111 + GR_ROLE_AUTH = 0x0010,
60112 + GR_ROLE_NOPW = 0x0020,
60113 + GR_ROLE_GOD = 0x0040,
60114 + GR_ROLE_LEARN = 0x0080,
60115 + GR_ROLE_TPE = 0x0100,
60116 + GR_ROLE_DOMAIN = 0x0200,
60117 + GR_ROLE_PAM = 0x0400,
60118 + GR_ROLE_PERSIST = 0x0800
60119 +};
60120 +
60121 +/* ACL Subject and Object mode flags */
60122 +enum {
60123 + GR_DELETED = 0x80000000
60124 +};
60125 +
60126 +/* ACL Object-only mode flags */
60127 +enum {
60128 + GR_READ = 0x00000001,
60129 + GR_APPEND = 0x00000002,
60130 + GR_WRITE = 0x00000004,
60131 + GR_EXEC = 0x00000008,
60132 + GR_FIND = 0x00000010,
60133 + GR_INHERIT = 0x00000020,
60134 + GR_SETID = 0x00000040,
60135 + GR_CREATE = 0x00000080,
60136 + GR_DELETE = 0x00000100,
60137 + GR_LINK = 0x00000200,
60138 + GR_AUDIT_READ = 0x00000400,
60139 + GR_AUDIT_APPEND = 0x00000800,
60140 + GR_AUDIT_WRITE = 0x00001000,
60141 + GR_AUDIT_EXEC = 0x00002000,
60142 + GR_AUDIT_FIND = 0x00004000,
60143 + GR_AUDIT_INHERIT= 0x00008000,
60144 + GR_AUDIT_SETID = 0x00010000,
60145 + GR_AUDIT_CREATE = 0x00020000,
60146 + GR_AUDIT_DELETE = 0x00040000,
60147 + GR_AUDIT_LINK = 0x00080000,
60148 + GR_PTRACERD = 0x00100000,
60149 + GR_NOPTRACE = 0x00200000,
60150 + GR_SUPPRESS = 0x00400000,
60151 + GR_NOLEARN = 0x00800000,
60152 + GR_INIT_TRANSFER= 0x01000000
60153 +};
60154 +
60155 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60156 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60157 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60158 +
60159 +/* ACL subject-only mode flags */
60160 +enum {
60161 + GR_KILL = 0x00000001,
60162 + GR_VIEW = 0x00000002,
60163 + GR_PROTECTED = 0x00000004,
60164 + GR_LEARN = 0x00000008,
60165 + GR_OVERRIDE = 0x00000010,
60166 + /* just a placeholder, this mode is only used in userspace */
60167 + GR_DUMMY = 0x00000020,
60168 + GR_PROTSHM = 0x00000040,
60169 + GR_KILLPROC = 0x00000080,
60170 + GR_KILLIPPROC = 0x00000100,
60171 + /* just a placeholder, this mode is only used in userspace */
60172 + GR_NOTROJAN = 0x00000200,
60173 + GR_PROTPROCFD = 0x00000400,
60174 + GR_PROCACCT = 0x00000800,
60175 + GR_RELAXPTRACE = 0x00001000,
60176 + GR_NESTED = 0x00002000,
60177 + GR_INHERITLEARN = 0x00004000,
60178 + GR_PROCFIND = 0x00008000,
60179 + GR_POVERRIDE = 0x00010000,
60180 + GR_KERNELAUTH = 0x00020000,
60181 + GR_ATSECURE = 0x00040000,
60182 + GR_SHMEXEC = 0x00080000
60183 +};
60184 +
60185 +enum {
60186 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60187 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60188 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60189 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60190 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60191 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60192 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60193 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60194 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60195 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60196 +};
60197 +
60198 +enum {
60199 + GR_ID_USER = 0x01,
60200 + GR_ID_GROUP = 0x02,
60201 +};
60202 +
60203 +enum {
60204 + GR_ID_ALLOW = 0x01,
60205 + GR_ID_DENY = 0x02,
60206 +};
60207 +
60208 +#define GR_CRASH_RES 31
60209 +#define GR_UIDTABLE_MAX 500
60210 +
60211 +/* begin resource learning section */
60212 +enum {
60213 + GR_RLIM_CPU_BUMP = 60,
60214 + GR_RLIM_FSIZE_BUMP = 50000,
60215 + GR_RLIM_DATA_BUMP = 10000,
60216 + GR_RLIM_STACK_BUMP = 1000,
60217 + GR_RLIM_CORE_BUMP = 10000,
60218 + GR_RLIM_RSS_BUMP = 500000,
60219 + GR_RLIM_NPROC_BUMP = 1,
60220 + GR_RLIM_NOFILE_BUMP = 5,
60221 + GR_RLIM_MEMLOCK_BUMP = 50000,
60222 + GR_RLIM_AS_BUMP = 500000,
60223 + GR_RLIM_LOCKS_BUMP = 2,
60224 + GR_RLIM_SIGPENDING_BUMP = 5,
60225 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60226 + GR_RLIM_NICE_BUMP = 1,
60227 + GR_RLIM_RTPRIO_BUMP = 1,
60228 + GR_RLIM_RTTIME_BUMP = 1000000
60229 +};
60230 +
60231 +#endif
60232 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60233 new file mode 100644
60234 index 0000000..da390f1
60235 --- /dev/null
60236 +++ b/include/linux/grinternal.h
60237 @@ -0,0 +1,221 @@
60238 +#ifndef __GRINTERNAL_H
60239 +#define __GRINTERNAL_H
60240 +
60241 +#ifdef CONFIG_GRKERNSEC
60242 +
60243 +#include <linux/fs.h>
60244 +#include <linux/mnt_namespace.h>
60245 +#include <linux/nsproxy.h>
60246 +#include <linux/gracl.h>
60247 +#include <linux/grdefs.h>
60248 +#include <linux/grmsg.h>
60249 +
60250 +void gr_add_learn_entry(const char *fmt, ...)
60251 + __attribute__ ((format (printf, 1, 2)));
60252 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60253 + const struct vfsmount *mnt);
60254 +__u32 gr_check_create(const struct dentry *new_dentry,
60255 + const struct dentry *parent,
60256 + const struct vfsmount *mnt, const __u32 mode);
60257 +int gr_check_protected_task(const struct task_struct *task);
60258 +__u32 to_gr_audit(const __u32 reqmode);
60259 +int gr_set_acls(const int type);
60260 +int gr_apply_subject_to_task(struct task_struct *task);
60261 +int gr_acl_is_enabled(void);
60262 +char gr_roletype_to_char(void);
60263 +
60264 +void gr_handle_alertkill(struct task_struct *task);
60265 +char *gr_to_filename(const struct dentry *dentry,
60266 + const struct vfsmount *mnt);
60267 +char *gr_to_filename1(const struct dentry *dentry,
60268 + const struct vfsmount *mnt);
60269 +char *gr_to_filename2(const struct dentry *dentry,
60270 + const struct vfsmount *mnt);
60271 +char *gr_to_filename3(const struct dentry *dentry,
60272 + const struct vfsmount *mnt);
60273 +
60274 +extern int grsec_enable_ptrace_readexec;
60275 +extern int grsec_enable_harden_ptrace;
60276 +extern int grsec_enable_link;
60277 +extern int grsec_enable_fifo;
60278 +extern int grsec_enable_execve;
60279 +extern int grsec_enable_shm;
60280 +extern int grsec_enable_execlog;
60281 +extern int grsec_enable_signal;
60282 +extern int grsec_enable_audit_ptrace;
60283 +extern int grsec_enable_forkfail;
60284 +extern int grsec_enable_time;
60285 +extern int grsec_enable_rofs;
60286 +extern int grsec_enable_chroot_shmat;
60287 +extern int grsec_enable_chroot_mount;
60288 +extern int grsec_enable_chroot_double;
60289 +extern int grsec_enable_chroot_pivot;
60290 +extern int grsec_enable_chroot_chdir;
60291 +extern int grsec_enable_chroot_chmod;
60292 +extern int grsec_enable_chroot_mknod;
60293 +extern int grsec_enable_chroot_fchdir;
60294 +extern int grsec_enable_chroot_nice;
60295 +extern int grsec_enable_chroot_execlog;
60296 +extern int grsec_enable_chroot_caps;
60297 +extern int grsec_enable_chroot_sysctl;
60298 +extern int grsec_enable_chroot_unix;
60299 +extern int grsec_enable_tpe;
60300 +extern int grsec_tpe_gid;
60301 +extern int grsec_enable_tpe_all;
60302 +extern int grsec_enable_tpe_invert;
60303 +extern int grsec_enable_socket_all;
60304 +extern int grsec_socket_all_gid;
60305 +extern int grsec_enable_socket_client;
60306 +extern int grsec_socket_client_gid;
60307 +extern int grsec_enable_socket_server;
60308 +extern int grsec_socket_server_gid;
60309 +extern int grsec_audit_gid;
60310 +extern int grsec_enable_group;
60311 +extern int grsec_enable_audit_textrel;
60312 +extern int grsec_enable_log_rwxmaps;
60313 +extern int grsec_enable_mount;
60314 +extern int grsec_enable_chdir;
60315 +extern int grsec_resource_logging;
60316 +extern int grsec_enable_blackhole;
60317 +extern int grsec_lastack_retries;
60318 +extern int grsec_enable_brute;
60319 +extern int grsec_lock;
60320 +
60321 +extern spinlock_t grsec_alert_lock;
60322 +extern unsigned long grsec_alert_wtime;
60323 +extern unsigned long grsec_alert_fyet;
60324 +
60325 +extern spinlock_t grsec_audit_lock;
60326 +
60327 +extern rwlock_t grsec_exec_file_lock;
60328 +
60329 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60330 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60331 + (tsk)->exec_file->f_vfsmnt) : "/")
60332 +
60333 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60334 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60335 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60336 +
60337 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60338 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60339 + (tsk)->exec_file->f_vfsmnt) : "/")
60340 +
60341 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60342 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60343 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60344 +
60345 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60346 +
60347 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60348 +
60349 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60350 + (task)->pid, (cred)->uid, \
60351 + (cred)->euid, (cred)->gid, (cred)->egid, \
60352 + gr_parent_task_fullpath(task), \
60353 + (task)->real_parent->comm, (task)->real_parent->pid, \
60354 + (pcred)->uid, (pcred)->euid, \
60355 + (pcred)->gid, (pcred)->egid
60356 +
60357 +#define GR_CHROOT_CAPS {{ \
60358 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60359 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60360 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60361 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60362 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60363 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60364 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60365 +
60366 +#define security_learn(normal_msg,args...) \
60367 +({ \
60368 + read_lock(&grsec_exec_file_lock); \
60369 + gr_add_learn_entry(normal_msg "\n", ## args); \
60370 + read_unlock(&grsec_exec_file_lock); \
60371 +})
60372 +
60373 +enum {
60374 + GR_DO_AUDIT,
60375 + GR_DONT_AUDIT,
60376 + /* used for non-audit messages that we shouldn't kill the task on */
60377 + GR_DONT_AUDIT_GOOD
60378 +};
60379 +
60380 +enum {
60381 + GR_TTYSNIFF,
60382 + GR_RBAC,
60383 + GR_RBAC_STR,
60384 + GR_STR_RBAC,
60385 + GR_RBAC_MODE2,
60386 + GR_RBAC_MODE3,
60387 + GR_FILENAME,
60388 + GR_SYSCTL_HIDDEN,
60389 + GR_NOARGS,
60390 + GR_ONE_INT,
60391 + GR_ONE_INT_TWO_STR,
60392 + GR_ONE_STR,
60393 + GR_STR_INT,
60394 + GR_TWO_STR_INT,
60395 + GR_TWO_INT,
60396 + GR_TWO_U64,
60397 + GR_THREE_INT,
60398 + GR_FIVE_INT_TWO_STR,
60399 + GR_TWO_STR,
60400 + GR_THREE_STR,
60401 + GR_FOUR_STR,
60402 + GR_STR_FILENAME,
60403 + GR_FILENAME_STR,
60404 + GR_FILENAME_TWO_INT,
60405 + GR_FILENAME_TWO_INT_STR,
60406 + GR_TEXTREL,
60407 + GR_PTRACE,
60408 + GR_RESOURCE,
60409 + GR_CAP,
60410 + GR_SIG,
60411 + GR_SIG2,
60412 + GR_CRASH1,
60413 + GR_CRASH2,
60414 + GR_PSACCT,
60415 + GR_RWXMAP
60416 +};
60417 +
60418 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60419 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60420 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60421 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60422 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60423 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60424 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60425 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60426 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60427 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60428 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60429 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60430 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60431 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60432 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60433 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60434 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60435 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60436 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60437 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60438 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60439 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60440 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60441 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60442 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60443 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60444 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60445 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60446 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60447 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60448 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60449 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60450 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60451 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60452 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60453 +
60454 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60455 +
60456 +#endif
60457 +
60458 +#endif
60459 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60460 new file mode 100644
60461 index 0000000..ae576a1
60462 --- /dev/null
60463 +++ b/include/linux/grmsg.h
60464 @@ -0,0 +1,109 @@
60465 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60466 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60467 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60468 +#define GR_STOPMOD_MSG "denied modification of module state by "
60469 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60470 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60471 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60472 +#define GR_IOPL_MSG "denied use of iopl() by "
60473 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60474 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60475 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60476 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60477 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60478 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60479 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60480 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60481 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60482 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60483 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60484 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60485 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60486 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60487 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60488 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60489 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60490 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60491 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60492 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60493 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60494 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60495 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60496 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60497 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60498 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60499 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60500 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60501 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60502 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60503 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60504 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60505 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60506 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60507 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60508 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60509 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60510 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60511 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60512 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60513 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60514 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60515 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60516 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60517 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60518 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60519 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60520 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60521 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60522 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60523 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60524 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60525 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60526 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60527 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60528 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60529 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60530 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60531 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60532 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60533 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60534 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60535 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60536 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60537 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60538 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60539 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60540 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60541 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60542 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60543 +#define GR_TIME_MSG "time set by "
60544 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60545 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60546 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60547 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60548 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60549 +#define GR_BIND_MSG "denied bind() by "
60550 +#define GR_CONNECT_MSG "denied connect() by "
60551 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60552 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60553 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60554 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60555 +#define GR_CAP_ACL_MSG "use of %s denied for "
60556 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60557 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60558 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60559 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60560 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60561 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60562 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60563 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60564 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60565 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60566 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60567 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60568 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60569 +#define GR_VM86_MSG "denied use of vm86 by "
60570 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60571 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60572 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60573 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60574 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60575 new file mode 100644
60576 index 0000000..acd05db
60577 --- /dev/null
60578 +++ b/include/linux/grsecurity.h
60579 @@ -0,0 +1,232 @@
60580 +#ifndef GR_SECURITY_H
60581 +#define GR_SECURITY_H
60582 +#include <linux/fs.h>
60583 +#include <linux/fs_struct.h>
60584 +#include <linux/binfmts.h>
60585 +#include <linux/gracl.h>
60586 +
60587 +/* notify of brain-dead configs */
60588 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60589 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60590 +#endif
60591 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60592 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60593 +#endif
60594 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60595 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60596 +#endif
60597 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60598 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60599 +#endif
60600 +
60601 +#include <linux/compat.h>
60602 +
60603 +struct user_arg_ptr {
60604 +#ifdef CONFIG_COMPAT
60605 + bool is_compat;
60606 +#endif
60607 + union {
60608 + const char __user *const __user *native;
60609 +#ifdef CONFIG_COMPAT
60610 + compat_uptr_t __user *compat;
60611 +#endif
60612 + } ptr;
60613 +};
60614 +
60615 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60616 +void gr_handle_brute_check(void);
60617 +void gr_handle_kernel_exploit(void);
60618 +int gr_process_user_ban(void);
60619 +
60620 +char gr_roletype_to_char(void);
60621 +
60622 +int gr_acl_enable_at_secure(void);
60623 +
60624 +int gr_check_user_change(int real, int effective, int fs);
60625 +int gr_check_group_change(int real, int effective, int fs);
60626 +
60627 +void gr_del_task_from_ip_table(struct task_struct *p);
60628 +
60629 +int gr_pid_is_chrooted(struct task_struct *p);
60630 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60631 +int gr_handle_chroot_nice(void);
60632 +int gr_handle_chroot_sysctl(const int op);
60633 +int gr_handle_chroot_setpriority(struct task_struct *p,
60634 + const int niceval);
60635 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60636 +int gr_handle_chroot_chroot(const struct dentry *dentry,
60637 + const struct vfsmount *mnt);
60638 +void gr_handle_chroot_chdir(struct path *path);
60639 +int gr_handle_chroot_chmod(const struct dentry *dentry,
60640 + const struct vfsmount *mnt, const int mode);
60641 +int gr_handle_chroot_mknod(const struct dentry *dentry,
60642 + const struct vfsmount *mnt, const int mode);
60643 +int gr_handle_chroot_mount(const struct dentry *dentry,
60644 + const struct vfsmount *mnt,
60645 + const char *dev_name);
60646 +int gr_handle_chroot_pivot(void);
60647 +int gr_handle_chroot_unix(const pid_t pid);
60648 +
60649 +int gr_handle_rawio(const struct inode *inode);
60650 +
60651 +void gr_handle_ioperm(void);
60652 +void gr_handle_iopl(void);
60653 +
60654 +umode_t gr_acl_umask(void);
60655 +
60656 +int gr_tpe_allow(const struct file *file);
60657 +
60658 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60659 +void gr_clear_chroot_entries(struct task_struct *task);
60660 +
60661 +void gr_log_forkfail(const int retval);
60662 +void gr_log_timechange(void);
60663 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60664 +void gr_log_chdir(const struct dentry *dentry,
60665 + const struct vfsmount *mnt);
60666 +void gr_log_chroot_exec(const struct dentry *dentry,
60667 + const struct vfsmount *mnt);
60668 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
60669 +void gr_log_remount(const char *devname, const int retval);
60670 +void gr_log_unmount(const char *devname, const int retval);
60671 +void gr_log_mount(const char *from, const char *to, const int retval);
60672 +void gr_log_textrel(struct vm_area_struct *vma);
60673 +void gr_log_rwxmmap(struct file *file);
60674 +void gr_log_rwxmprotect(struct file *file);
60675 +
60676 +int gr_handle_follow_link(const struct inode *parent,
60677 + const struct inode *inode,
60678 + const struct dentry *dentry,
60679 + const struct vfsmount *mnt);
60680 +int gr_handle_fifo(const struct dentry *dentry,
60681 + const struct vfsmount *mnt,
60682 + const struct dentry *dir, const int flag,
60683 + const int acc_mode);
60684 +int gr_handle_hardlink(const struct dentry *dentry,
60685 + const struct vfsmount *mnt,
60686 + struct inode *inode,
60687 + const int mode, const char *to);
60688 +
60689 +int gr_is_capable(const int cap);
60690 +int gr_is_capable_nolog(const int cap);
60691 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
60692 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
60693 +
60694 +void gr_learn_resource(const struct task_struct *task, const int limit,
60695 + const unsigned long wanted, const int gt);
60696 +void gr_copy_label(struct task_struct *tsk);
60697 +void gr_handle_crash(struct task_struct *task, const int sig);
60698 +int gr_handle_signal(const struct task_struct *p, const int sig);
60699 +int gr_check_crash_uid(const uid_t uid);
60700 +int gr_check_protected_task(const struct task_struct *task);
60701 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
60702 +int gr_acl_handle_mmap(const struct file *file,
60703 + const unsigned long prot);
60704 +int gr_acl_handle_mprotect(const struct file *file,
60705 + const unsigned long prot);
60706 +int gr_check_hidden_task(const struct task_struct *tsk);
60707 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
60708 + const struct vfsmount *mnt);
60709 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
60710 + const struct vfsmount *mnt);
60711 +__u32 gr_acl_handle_access(const struct dentry *dentry,
60712 + const struct vfsmount *mnt, const int fmode);
60713 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
60714 + const struct vfsmount *mnt, umode_t *mode);
60715 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
60716 + const struct vfsmount *mnt);
60717 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
60718 + const struct vfsmount *mnt);
60719 +int gr_handle_ptrace(struct task_struct *task, const long request);
60720 +int gr_handle_proc_ptrace(struct task_struct *task);
60721 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
60722 + const struct vfsmount *mnt);
60723 +int gr_check_crash_exec(const struct file *filp);
60724 +int gr_acl_is_enabled(void);
60725 +void gr_set_kernel_label(struct task_struct *task);
60726 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
60727 + const gid_t gid);
60728 +int gr_set_proc_label(const struct dentry *dentry,
60729 + const struct vfsmount *mnt,
60730 + const int unsafe_flags);
60731 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
60732 + const struct vfsmount *mnt);
60733 +__u32 gr_acl_handle_open(const struct dentry *dentry,
60734 + const struct vfsmount *mnt, int acc_mode);
60735 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
60736 + const struct dentry *p_dentry,
60737 + const struct vfsmount *p_mnt,
60738 + int open_flags, int acc_mode, const int imode);
60739 +void gr_handle_create(const struct dentry *dentry,
60740 + const struct vfsmount *mnt);
60741 +void gr_handle_proc_create(const struct dentry *dentry,
60742 + const struct inode *inode);
60743 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
60744 + const struct dentry *parent_dentry,
60745 + const struct vfsmount *parent_mnt,
60746 + const int mode);
60747 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
60748 + const struct dentry *parent_dentry,
60749 + const struct vfsmount *parent_mnt);
60750 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
60751 + const struct vfsmount *mnt);
60752 +void gr_handle_delete(const ino_t ino, const dev_t dev);
60753 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
60754 + const struct vfsmount *mnt);
60755 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
60756 + const struct dentry *parent_dentry,
60757 + const struct vfsmount *parent_mnt,
60758 + const char *from);
60759 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
60760 + const struct dentry *parent_dentry,
60761 + const struct vfsmount *parent_mnt,
60762 + const struct dentry *old_dentry,
60763 + const struct vfsmount *old_mnt, const char *to);
60764 +int gr_acl_handle_rename(struct dentry *new_dentry,
60765 + struct dentry *parent_dentry,
60766 + const struct vfsmount *parent_mnt,
60767 + struct dentry *old_dentry,
60768 + struct inode *old_parent_inode,
60769 + struct vfsmount *old_mnt, const char *newname);
60770 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60771 + struct dentry *old_dentry,
60772 + struct dentry *new_dentry,
60773 + struct vfsmount *mnt, const __u8 replace);
60774 +__u32 gr_check_link(const struct dentry *new_dentry,
60775 + const struct dentry *parent_dentry,
60776 + const struct vfsmount *parent_mnt,
60777 + const struct dentry *old_dentry,
60778 + const struct vfsmount *old_mnt);
60779 +int gr_acl_handle_filldir(const struct file *file, const char *name,
60780 + const unsigned int namelen, const ino_t ino);
60781 +
60782 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
60783 + const struct vfsmount *mnt);
60784 +void gr_acl_handle_exit(void);
60785 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
60786 +int gr_acl_handle_procpidmem(const struct task_struct *task);
60787 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
60788 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
60789 +void gr_audit_ptrace(struct task_struct *task);
60790 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
60791 +
60792 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
60793 +
60794 +#ifdef CONFIG_GRKERNSEC
60795 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
60796 +void gr_handle_vm86(void);
60797 +void gr_handle_mem_readwrite(u64 from, u64 to);
60798 +
60799 +void gr_log_badprocpid(const char *entry);
60800 +
60801 +extern int grsec_enable_dmesg;
60802 +extern int grsec_disable_privio;
60803 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60804 +extern int grsec_enable_chroot_findtask;
60805 +#endif
60806 +#ifdef CONFIG_GRKERNSEC_SETXID
60807 +extern int grsec_enable_setxid;
60808 +#endif
60809 +#endif
60810 +
60811 +#endif
60812 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
60813 new file mode 100644
60814 index 0000000..e7ffaaf
60815 --- /dev/null
60816 +++ b/include/linux/grsock.h
60817 @@ -0,0 +1,19 @@
60818 +#ifndef __GRSOCK_H
60819 +#define __GRSOCK_H
60820 +
60821 +extern void gr_attach_curr_ip(const struct sock *sk);
60822 +extern int gr_handle_sock_all(const int family, const int type,
60823 + const int protocol);
60824 +extern int gr_handle_sock_server(const struct sockaddr *sck);
60825 +extern int gr_handle_sock_server_other(const struct sock *sck);
60826 +extern int gr_handle_sock_client(const struct sockaddr *sck);
60827 +extern int gr_search_connect(struct socket * sock,
60828 + struct sockaddr_in * addr);
60829 +extern int gr_search_bind(struct socket * sock,
60830 + struct sockaddr_in * addr);
60831 +extern int gr_search_listen(struct socket * sock);
60832 +extern int gr_search_accept(struct socket * sock);
60833 +extern int gr_search_socket(const int domain, const int type,
60834 + const int protocol);
60835 +
60836 +#endif
60837 diff --git a/include/linux/hid.h b/include/linux/hid.h
60838 index 3a95da6..51986f1 100644
60839 --- a/include/linux/hid.h
60840 +++ b/include/linux/hid.h
60841 @@ -696,7 +696,7 @@ struct hid_ll_driver {
60842 unsigned int code, int value);
60843
60844 int (*parse)(struct hid_device *hdev);
60845 -};
60846 +} __no_const;
60847
60848 #define PM_HINT_FULLON 1<<5
60849 #define PM_HINT_NORMAL 1<<1
60850 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
60851 index 3a93f73..b19d0b3 100644
60852 --- a/include/linux/highmem.h
60853 +++ b/include/linux/highmem.h
60854 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
60855 kunmap_atomic(kaddr, KM_USER0);
60856 }
60857
60858 +static inline void sanitize_highpage(struct page *page)
60859 +{
60860 + void *kaddr;
60861 + unsigned long flags;
60862 +
60863 + local_irq_save(flags);
60864 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
60865 + clear_page(kaddr);
60866 + kunmap_atomic(kaddr, KM_CLEARPAGE);
60867 + local_irq_restore(flags);
60868 +}
60869 +
60870 static inline void zero_user_segments(struct page *page,
60871 unsigned start1, unsigned end1,
60872 unsigned start2, unsigned end2)
60873 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
60874 index 8e25a91..551b161 100644
60875 --- a/include/linux/i2c.h
60876 +++ b/include/linux/i2c.h
60877 @@ -364,6 +364,7 @@ struct i2c_algorithm {
60878 /* To determine what the adapter supports */
60879 u32 (*functionality) (struct i2c_adapter *);
60880 };
60881 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
60882
60883 /*
60884 * i2c_adapter is the structure used to identify a physical i2c bus along
60885 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
60886 index a6deef4..c56a7f2 100644
60887 --- a/include/linux/i2o.h
60888 +++ b/include/linux/i2o.h
60889 @@ -564,7 +564,7 @@ struct i2o_controller {
60890 struct i2o_device *exec; /* Executive */
60891 #if BITS_PER_LONG == 64
60892 spinlock_t context_list_lock; /* lock for context_list */
60893 - atomic_t context_list_counter; /* needed for unique contexts */
60894 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
60895 struct list_head context_list; /* list of context id's
60896 and pointers */
60897 #endif
60898 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
60899 index 58404b0..439ed95 100644
60900 --- a/include/linux/if_team.h
60901 +++ b/include/linux/if_team.h
60902 @@ -64,6 +64,7 @@ struct team_mode_ops {
60903 void (*port_leave)(struct team *team, struct team_port *port);
60904 void (*port_change_mac)(struct team *team, struct team_port *port);
60905 };
60906 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
60907
60908 enum team_option_type {
60909 TEAM_OPTION_TYPE_U32,
60910 @@ -112,7 +113,7 @@ struct team {
60911 struct list_head option_list;
60912
60913 const struct team_mode *mode;
60914 - struct team_mode_ops ops;
60915 + team_mode_ops_no_const ops;
60916 long mode_priv[TEAM_MODE_PRIV_LONGS];
60917 };
60918
60919 diff --git a/include/linux/init.h b/include/linux/init.h
60920 index 6b95109..4aca62c 100644
60921 --- a/include/linux/init.h
60922 +++ b/include/linux/init.h
60923 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
60924
60925 /* Each module must use one module_init(). */
60926 #define module_init(initfn) \
60927 - static inline initcall_t __inittest(void) \
60928 + static inline __used initcall_t __inittest(void) \
60929 { return initfn; } \
60930 int init_module(void) __attribute__((alias(#initfn)));
60931
60932 /* This is only required if you want to be unloadable. */
60933 #define module_exit(exitfn) \
60934 - static inline exitcall_t __exittest(void) \
60935 + static inline __used exitcall_t __exittest(void) \
60936 { return exitfn; } \
60937 void cleanup_module(void) __attribute__((alias(#exitfn)));
60938
60939 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
60940 index 9c66b1a..a3fdded 100644
60941 --- a/include/linux/init_task.h
60942 +++ b/include/linux/init_task.h
60943 @@ -127,6 +127,12 @@ extern struct cred init_cred;
60944
60945 #define INIT_TASK_COMM "swapper"
60946
60947 +#ifdef CONFIG_X86
60948 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
60949 +#else
60950 +#define INIT_TASK_THREAD_INFO
60951 +#endif
60952 +
60953 /*
60954 * INIT_TASK is used to set up the first task table, touch at
60955 * your own risk!. Base=0, limit=0x1fffff (=2MB)
60956 @@ -165,6 +171,7 @@ extern struct cred init_cred;
60957 RCU_INIT_POINTER(.cred, &init_cred), \
60958 .comm = INIT_TASK_COMM, \
60959 .thread = INIT_THREAD, \
60960 + INIT_TASK_THREAD_INFO \
60961 .fs = &init_fs, \
60962 .files = &init_files, \
60963 .signal = &init_signals, \
60964 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
60965 index e6ca56d..8583707 100644
60966 --- a/include/linux/intel-iommu.h
60967 +++ b/include/linux/intel-iommu.h
60968 @@ -296,7 +296,7 @@ struct iommu_flush {
60969 u8 fm, u64 type);
60970 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
60971 unsigned int size_order, u64 type);
60972 -};
60973 +} __no_const;
60974
60975 enum {
60976 SR_DMAR_FECTL_REG,
60977 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
60978 index a64b00e..464d8bc 100644
60979 --- a/include/linux/interrupt.h
60980 +++ b/include/linux/interrupt.h
60981 @@ -441,7 +441,7 @@ enum
60982 /* map softirq index to softirq name. update 'softirq_to_name' in
60983 * kernel/softirq.c when adding a new softirq.
60984 */
60985 -extern char *softirq_to_name[NR_SOFTIRQS];
60986 +extern const char * const softirq_to_name[NR_SOFTIRQS];
60987
60988 /* softirq mask and active fields moved to irq_cpustat_t in
60989 * asm/hardirq.h to get better cache usage. KAO
60990 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
60991
60992 struct softirq_action
60993 {
60994 - void (*action)(struct softirq_action *);
60995 + void (*action)(void);
60996 };
60997
60998 asmlinkage void do_softirq(void);
60999 asmlinkage void __do_softirq(void);
61000 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61001 +extern void open_softirq(int nr, void (*action)(void));
61002 extern void softirq_init(void);
61003 static inline void __raise_softirq_irqoff(unsigned int nr)
61004 {
61005 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61006 index 3875719..4cd454c 100644
61007 --- a/include/linux/kallsyms.h
61008 +++ b/include/linux/kallsyms.h
61009 @@ -15,7 +15,8 @@
61010
61011 struct module;
61012
61013 -#ifdef CONFIG_KALLSYMS
61014 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61015 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61016 /* Lookup the address for a symbol. Returns 0 if not found. */
61017 unsigned long kallsyms_lookup_name(const char *name);
61018
61019 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61020 /* Stupid that this does nothing, but I didn't create this mess. */
61021 #define __print_symbol(fmt, addr)
61022 #endif /*CONFIG_KALLSYMS*/
61023 +#else /* when included by kallsyms.c, vsnprintf.c, or
61024 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61025 +extern void __print_symbol(const char *fmt, unsigned long address);
61026 +extern int sprint_backtrace(char *buffer, unsigned long address);
61027 +extern int sprint_symbol(char *buffer, unsigned long address);
61028 +const char *kallsyms_lookup(unsigned long addr,
61029 + unsigned long *symbolsize,
61030 + unsigned long *offset,
61031 + char **modname, char *namebuf);
61032 +#endif
61033
61034 /* This macro allows us to keep printk typechecking */
61035 static __printf(1, 2)
61036 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61037 index fa39183..40160be 100644
61038 --- a/include/linux/kgdb.h
61039 +++ b/include/linux/kgdb.h
61040 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61041 extern int kgdb_io_module_registered;
61042
61043 extern atomic_t kgdb_setting_breakpoint;
61044 -extern atomic_t kgdb_cpu_doing_single_step;
61045 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61046
61047 extern struct task_struct *kgdb_usethread;
61048 extern struct task_struct *kgdb_contthread;
61049 @@ -251,7 +251,7 @@ struct kgdb_arch {
61050 void (*disable_hw_break)(struct pt_regs *regs);
61051 void (*remove_all_hw_break)(void);
61052 void (*correct_hw_break)(void);
61053 -};
61054 +} __do_const;
61055
61056 /**
61057 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61058 @@ -276,7 +276,7 @@ struct kgdb_io {
61059 void (*pre_exception) (void);
61060 void (*post_exception) (void);
61061 int is_console;
61062 -};
61063 +} __do_const;
61064
61065 extern struct kgdb_arch arch_kgdb_ops;
61066
61067 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61068 index 722f477..eef2a27 100644
61069 --- a/include/linux/kmod.h
61070 +++ b/include/linux/kmod.h
61071 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61072 * usually useless though. */
61073 extern __printf(2, 3)
61074 int __request_module(bool wait, const char *name, ...);
61075 +extern __printf(3, 4)
61076 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61077 #define request_module(mod...) __request_module(true, mod)
61078 #define request_module_nowait(mod...) __request_module(false, mod)
61079 #define try_then_request_module(x, mod...) \
61080 diff --git a/include/linux/kref.h b/include/linux/kref.h
61081 index 9c07dce..a92fa71 100644
61082 --- a/include/linux/kref.h
61083 +++ b/include/linux/kref.h
61084 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61085 static inline int kref_sub(struct kref *kref, unsigned int count,
61086 void (*release)(struct kref *kref))
61087 {
61088 - WARN_ON(release == NULL);
61089 + BUG_ON(release == NULL);
61090
61091 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61092 release(kref);
61093 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61094 index 900c763..43260cf 100644
61095 --- a/include/linux/kvm_host.h
61096 +++ b/include/linux/kvm_host.h
61097 @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61098 void vcpu_load(struct kvm_vcpu *vcpu);
61099 void vcpu_put(struct kvm_vcpu *vcpu);
61100
61101 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61102 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61103 struct module *module);
61104 void kvm_exit(void);
61105
61106 @@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61107 struct kvm_guest_debug *dbg);
61108 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61109
61110 -int kvm_arch_init(void *opaque);
61111 +int kvm_arch_init(const void *opaque);
61112 void kvm_arch_exit(void);
61113
61114 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61115 diff --git a/include/linux/libata.h b/include/linux/libata.h
61116 index cafc09a..d7e7829 100644
61117 --- a/include/linux/libata.h
61118 +++ b/include/linux/libata.h
61119 @@ -909,7 +909,7 @@ struct ata_port_operations {
61120 * fields must be pointers.
61121 */
61122 const struct ata_port_operations *inherits;
61123 -};
61124 +} __do_const;
61125
61126 struct ata_port_info {
61127 unsigned long flags;
61128 diff --git a/include/linux/mca.h b/include/linux/mca.h
61129 index 3797270..7765ede 100644
61130 --- a/include/linux/mca.h
61131 +++ b/include/linux/mca.h
61132 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61133 int region);
61134 void * (*mca_transform_memory)(struct mca_device *,
61135 void *memory);
61136 -};
61137 +} __no_const;
61138
61139 struct mca_bus {
61140 u64 default_dma_mask;
61141 diff --git a/include/linux/memory.h b/include/linux/memory.h
61142 index 1ac7f6e..a5794d0 100644
61143 --- a/include/linux/memory.h
61144 +++ b/include/linux/memory.h
61145 @@ -143,7 +143,7 @@ struct memory_accessor {
61146 size_t count);
61147 ssize_t (*write)(struct memory_accessor *, const char *buf,
61148 off_t offset, size_t count);
61149 -};
61150 +} __no_const;
61151
61152 /*
61153 * Kernel text modification mutex, used for code patching. Users of this lock
61154 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61155 index 9970337..9444122 100644
61156 --- a/include/linux/mfd/abx500.h
61157 +++ b/include/linux/mfd/abx500.h
61158 @@ -188,6 +188,7 @@ struct abx500_ops {
61159 int (*event_registers_startup_state_get) (struct device *, u8 *);
61160 int (*startup_irq_enabled) (struct device *, unsigned int);
61161 };
61162 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61163
61164 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61165 void abx500_remove_ops(struct device *dev);
61166 diff --git a/include/linux/mm.h b/include/linux/mm.h
61167 index 17b27cd..467ba2f 100644
61168 --- a/include/linux/mm.h
61169 +++ b/include/linux/mm.h
61170 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
61171
61172 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61173 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61174 +
61175 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61176 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61177 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61178 +#else
61179 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61180 +#endif
61181 +
61182 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61183 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61184
61185 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
61186 int set_page_dirty_lock(struct page *page);
61187 int clear_page_dirty_for_io(struct page *page);
61188
61189 -/* Is the vma a continuation of the stack vma above it? */
61190 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61191 -{
61192 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61193 -}
61194 -
61195 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61196 - unsigned long addr)
61197 -{
61198 - return (vma->vm_flags & VM_GROWSDOWN) &&
61199 - (vma->vm_start == addr) &&
61200 - !vma_growsdown(vma->vm_prev, addr);
61201 -}
61202 -
61203 -/* Is the vma a continuation of the stack vma below it? */
61204 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61205 -{
61206 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61207 -}
61208 -
61209 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61210 - unsigned long addr)
61211 -{
61212 - return (vma->vm_flags & VM_GROWSUP) &&
61213 - (vma->vm_end == addr) &&
61214 - !vma_growsup(vma->vm_next, addr);
61215 -}
61216 -
61217 extern unsigned long move_page_tables(struct vm_area_struct *vma,
61218 unsigned long old_addr, struct vm_area_struct *new_vma,
61219 unsigned long new_addr, unsigned long len);
61220 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
61221 }
61222 #endif
61223
61224 +#ifdef CONFIG_MMU
61225 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61226 +#else
61227 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61228 +{
61229 + return __pgprot(0);
61230 +}
61231 +#endif
61232 +
61233 int vma_wants_writenotify(struct vm_area_struct *vma);
61234
61235 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61236 @@ -1409,6 +1397,7 @@ out:
61237 }
61238
61239 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61240 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61241
61242 extern unsigned long do_brk(unsigned long, unsigned long);
61243
61244 @@ -1466,6 +1455,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61245 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61246 struct vm_area_struct **pprev);
61247
61248 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61249 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61250 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61251 +
61252 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61253 NULL if none. Assume start_addr < end_addr. */
61254 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61255 @@ -1494,15 +1487,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61256 return vma;
61257 }
61258
61259 -#ifdef CONFIG_MMU
61260 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61261 -#else
61262 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61263 -{
61264 - return __pgprot(0);
61265 -}
61266 -#endif
61267 -
61268 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61269 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61270 unsigned long pfn, unsigned long size, pgprot_t);
61271 @@ -1606,7 +1590,7 @@ extern int unpoison_memory(unsigned long pfn);
61272 extern int sysctl_memory_failure_early_kill;
61273 extern int sysctl_memory_failure_recovery;
61274 extern void shake_page(struct page *p, int access);
61275 -extern atomic_long_t mce_bad_pages;
61276 +extern atomic_long_unchecked_t mce_bad_pages;
61277 extern int soft_offline_page(struct page *page, int flags);
61278
61279 extern void dump_page(struct page *page);
61280 @@ -1637,5 +1621,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61281 static inline bool page_is_guard(struct page *page) { return false; }
61282 #endif /* CONFIG_DEBUG_PAGEALLOC */
61283
61284 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61285 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61286 +#else
61287 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61288 +#endif
61289 +
61290 #endif /* __KERNEL__ */
61291 #endif /* _LINUX_MM_H */
61292 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61293 index 3cc3062..8947a82 100644
61294 --- a/include/linux/mm_types.h
61295 +++ b/include/linux/mm_types.h
61296 @@ -252,6 +252,8 @@ struct vm_area_struct {
61297 #ifdef CONFIG_NUMA
61298 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61299 #endif
61300 +
61301 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61302 };
61303
61304 struct core_thread {
61305 @@ -388,6 +390,24 @@ struct mm_struct {
61306 #ifdef CONFIG_CPUMASK_OFFSTACK
61307 struct cpumask cpumask_allocation;
61308 #endif
61309 +
61310 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61311 + unsigned long pax_flags;
61312 +#endif
61313 +
61314 +#ifdef CONFIG_PAX_DLRESOLVE
61315 + unsigned long call_dl_resolve;
61316 +#endif
61317 +
61318 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61319 + unsigned long call_syscall;
61320 +#endif
61321 +
61322 +#ifdef CONFIG_PAX_ASLR
61323 + unsigned long delta_mmap; /* randomized offset */
61324 + unsigned long delta_stack; /* randomized offset */
61325 +#endif
61326 +
61327 };
61328
61329 static inline void mm_init_cpumask(struct mm_struct *mm)
61330 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61331 index 1d1b1e1..2a13c78 100644
61332 --- a/include/linux/mmu_notifier.h
61333 +++ b/include/linux/mmu_notifier.h
61334 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61335 */
61336 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61337 ({ \
61338 - pte_t __pte; \
61339 + pte_t ___pte; \
61340 struct vm_area_struct *___vma = __vma; \
61341 unsigned long ___address = __address; \
61342 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61343 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61344 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61345 - __pte; \
61346 + ___pte; \
61347 })
61348
61349 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61350 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61351 index 650ba2f..af0a58c 100644
61352 --- a/include/linux/mmzone.h
61353 +++ b/include/linux/mmzone.h
61354 @@ -379,7 +379,7 @@ struct zone {
61355 unsigned long flags; /* zone flags, see below */
61356
61357 /* Zone statistics */
61358 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61359 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61360
61361 /*
61362 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61363 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61364 index 83ac071..2656e0e 100644
61365 --- a/include/linux/mod_devicetable.h
61366 +++ b/include/linux/mod_devicetable.h
61367 @@ -12,7 +12,7 @@
61368 typedef unsigned long kernel_ulong_t;
61369 #endif
61370
61371 -#define PCI_ANY_ID (~0)
61372 +#define PCI_ANY_ID ((__u16)~0)
61373
61374 struct pci_device_id {
61375 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61376 @@ -131,7 +131,7 @@ struct usb_device_id {
61377 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61378 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61379
61380 -#define HID_ANY_ID (~0)
61381 +#define HID_ANY_ID (~0U)
61382
61383 struct hid_device_id {
61384 __u16 bus;
61385 diff --git a/include/linux/module.h b/include/linux/module.h
61386 index 4598bf0..e069d7f 100644
61387 --- a/include/linux/module.h
61388 +++ b/include/linux/module.h
61389 @@ -17,6 +17,7 @@
61390 #include <linux/moduleparam.h>
61391 #include <linux/tracepoint.h>
61392 #include <linux/export.h>
61393 +#include <linux/fs.h>
61394
61395 #include <linux/percpu.h>
61396 #include <asm/module.h>
61397 @@ -275,19 +276,16 @@ struct module
61398 int (*init)(void);
61399
61400 /* If this is non-NULL, vfree after init() returns */
61401 - void *module_init;
61402 + void *module_init_rx, *module_init_rw;
61403
61404 /* Here is the actual code + data, vfree'd on unload. */
61405 - void *module_core;
61406 + void *module_core_rx, *module_core_rw;
61407
61408 /* Here are the sizes of the init and core sections */
61409 - unsigned int init_size, core_size;
61410 + unsigned int init_size_rw, core_size_rw;
61411
61412 /* The size of the executable code in each section. */
61413 - unsigned int init_text_size, core_text_size;
61414 -
61415 - /* Size of RO sections of the module (text+rodata) */
61416 - unsigned int init_ro_size, core_ro_size;
61417 + unsigned int init_size_rx, core_size_rx;
61418
61419 /* Arch-specific module values */
61420 struct mod_arch_specific arch;
61421 @@ -343,6 +341,10 @@ struct module
61422 #ifdef CONFIG_EVENT_TRACING
61423 struct ftrace_event_call **trace_events;
61424 unsigned int num_trace_events;
61425 + struct file_operations trace_id;
61426 + struct file_operations trace_enable;
61427 + struct file_operations trace_format;
61428 + struct file_operations trace_filter;
61429 #endif
61430 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61431 unsigned int num_ftrace_callsites;
61432 @@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
61433 bool is_module_percpu_address(unsigned long addr);
61434 bool is_module_text_address(unsigned long addr);
61435
61436 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61437 +{
61438 +
61439 +#ifdef CONFIG_PAX_KERNEXEC
61440 + if (ktla_ktva(addr) >= (unsigned long)start &&
61441 + ktla_ktva(addr) < (unsigned long)start + size)
61442 + return 1;
61443 +#endif
61444 +
61445 + return ((void *)addr >= start && (void *)addr < start + size);
61446 +}
61447 +
61448 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61449 +{
61450 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61451 +}
61452 +
61453 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61454 +{
61455 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61456 +}
61457 +
61458 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61459 +{
61460 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61461 +}
61462 +
61463 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61464 +{
61465 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61466 +}
61467 +
61468 static inline int within_module_core(unsigned long addr, struct module *mod)
61469 {
61470 - return (unsigned long)mod->module_core <= addr &&
61471 - addr < (unsigned long)mod->module_core + mod->core_size;
61472 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61473 }
61474
61475 static inline int within_module_init(unsigned long addr, struct module *mod)
61476 {
61477 - return (unsigned long)mod->module_init <= addr &&
61478 - addr < (unsigned long)mod->module_init + mod->init_size;
61479 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61480 }
61481
61482 /* Search for module by name: must hold module_mutex. */
61483 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61484 index b2be02e..6a9fdb1 100644
61485 --- a/include/linux/moduleloader.h
61486 +++ b/include/linux/moduleloader.h
61487 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61488 sections. Returns NULL on failure. */
61489 void *module_alloc(unsigned long size);
61490
61491 +#ifdef CONFIG_PAX_KERNEXEC
61492 +void *module_alloc_exec(unsigned long size);
61493 +#else
61494 +#define module_alloc_exec(x) module_alloc(x)
61495 +#endif
61496 +
61497 /* Free memory returned from module_alloc. */
61498 void module_free(struct module *mod, void *module_region);
61499
61500 +#ifdef CONFIG_PAX_KERNEXEC
61501 +void module_free_exec(struct module *mod, void *module_region);
61502 +#else
61503 +#define module_free_exec(x, y) module_free((x), (y))
61504 +#endif
61505 +
61506 /* Apply the given relocation to the (simplified) ELF. Return -error
61507 or 0. */
61508 int apply_relocate(Elf_Shdr *sechdrs,
61509 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61510 index c47f4d6..23f9bdb 100644
61511 --- a/include/linux/moduleparam.h
61512 +++ b/include/linux/moduleparam.h
61513 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
61514 * @len is usually just sizeof(string).
61515 */
61516 #define module_param_string(name, string, len, perm) \
61517 - static const struct kparam_string __param_string_##name \
61518 + static const struct kparam_string __param_string_##name __used \
61519 = { len, string }; \
61520 __module_param_call(MODULE_PARAM_PREFIX, name, \
61521 &param_ops_string, \
61522 @@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
61523 */
61524 #define module_param_array_named(name, array, type, nump, perm) \
61525 param_check_##type(name, &(array)[0]); \
61526 - static const struct kparam_array __param_arr_##name \
61527 + static const struct kparam_array __param_arr_##name __used \
61528 = { .max = ARRAY_SIZE(array), .num = nump, \
61529 .ops = &param_ops_##type, \
61530 .elemsize = sizeof(array[0]), .elem = array }; \
61531 diff --git a/include/linux/namei.h b/include/linux/namei.h
61532 index ffc0213..2c1f2cb 100644
61533 --- a/include/linux/namei.h
61534 +++ b/include/linux/namei.h
61535 @@ -24,7 +24,7 @@ struct nameidata {
61536 unsigned seq;
61537 int last_type;
61538 unsigned depth;
61539 - char *saved_names[MAX_NESTED_LINKS + 1];
61540 + const char *saved_names[MAX_NESTED_LINKS + 1];
61541
61542 /* Intent data */
61543 union {
61544 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61545 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61546 extern void unlock_rename(struct dentry *, struct dentry *);
61547
61548 -static inline void nd_set_link(struct nameidata *nd, char *path)
61549 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61550 {
61551 nd->saved_names[nd->depth] = path;
61552 }
61553
61554 -static inline char *nd_get_link(struct nameidata *nd)
61555 +static inline const char *nd_get_link(const struct nameidata *nd)
61556 {
61557 return nd->saved_names[nd->depth];
61558 }
61559 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61560 index 0eac07c..a59f6a8 100644
61561 --- a/include/linux/netdevice.h
61562 +++ b/include/linux/netdevice.h
61563 @@ -1002,6 +1002,7 @@ struct net_device_ops {
61564 int (*ndo_neigh_construct)(struct neighbour *n);
61565 void (*ndo_neigh_destroy)(struct neighbour *n);
61566 };
61567 +typedef struct net_device_ops __no_const net_device_ops_no_const;
61568
61569 /*
61570 * The DEVICE structure.
61571 @@ -1063,7 +1064,7 @@ struct net_device {
61572 int iflink;
61573
61574 struct net_device_stats stats;
61575 - atomic_long_t rx_dropped; /* dropped packets by core network
61576 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61577 * Do not use this in drivers.
61578 */
61579
61580 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61581 new file mode 100644
61582 index 0000000..33f4af8
61583 --- /dev/null
61584 +++ b/include/linux/netfilter/xt_gradm.h
61585 @@ -0,0 +1,9 @@
61586 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
61587 +#define _LINUX_NETFILTER_XT_GRADM_H 1
61588 +
61589 +struct xt_gradm_mtinfo {
61590 + __u16 flags;
61591 + __u16 invflags;
61592 +};
61593 +
61594 +#endif
61595 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
61596 index c65a18a..0c05f3a 100644
61597 --- a/include/linux/of_pdt.h
61598 +++ b/include/linux/of_pdt.h
61599 @@ -32,7 +32,7 @@ struct of_pdt_ops {
61600
61601 /* return 0 on success; fill in 'len' with number of bytes in path */
61602 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
61603 -};
61604 +} __no_const;
61605
61606 extern void *prom_early_alloc(unsigned long size);
61607
61608 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
61609 index a4c5624..79d6d88 100644
61610 --- a/include/linux/oprofile.h
61611 +++ b/include/linux/oprofile.h
61612 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
61613 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
61614 char const * name, ulong * val);
61615
61616 -/** Create a file for read-only access to an atomic_t. */
61617 +/** Create a file for read-only access to an atomic_unchecked_t. */
61618 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
61619 - char const * name, atomic_t * val);
61620 + char const * name, atomic_unchecked_t * val);
61621
61622 /** create a directory */
61623 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
61624 diff --git a/include/linux/padata.h b/include/linux/padata.h
61625 index 4633b2f..988bc08 100644
61626 --- a/include/linux/padata.h
61627 +++ b/include/linux/padata.h
61628 @@ -129,7 +129,7 @@ struct parallel_data {
61629 struct padata_instance *pinst;
61630 struct padata_parallel_queue __percpu *pqueue;
61631 struct padata_serial_queue __percpu *squeue;
61632 - atomic_t seq_nr;
61633 + atomic_unchecked_t seq_nr;
61634 atomic_t reorder_objects;
61635 atomic_t refcnt;
61636 unsigned int max_seq_nr;
61637 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
61638 index abb2776..d8b8e15 100644
61639 --- a/include/linux/perf_event.h
61640 +++ b/include/linux/perf_event.h
61641 @@ -750,8 +750,8 @@ struct perf_event {
61642
61643 enum perf_event_active_state state;
61644 unsigned int attach_state;
61645 - local64_t count;
61646 - atomic64_t child_count;
61647 + local64_t count; /* PaX: fix it one day */
61648 + atomic64_unchecked_t child_count;
61649
61650 /*
61651 * These are the total time in nanoseconds that the event
61652 @@ -802,8 +802,8 @@ struct perf_event {
61653 * These accumulate total time (in nanoseconds) that children
61654 * events have been enabled and running, respectively.
61655 */
61656 - atomic64_t child_total_time_enabled;
61657 - atomic64_t child_total_time_running;
61658 + atomic64_unchecked_t child_total_time_enabled;
61659 + atomic64_unchecked_t child_total_time_running;
61660
61661 /*
61662 * Protect attach/detach and child_list:
61663 diff --git a/include/linux/personality.h b/include/linux/personality.h
61664 index 8fc7dd1a..c19d89e 100644
61665 --- a/include/linux/personality.h
61666 +++ b/include/linux/personality.h
61667 @@ -44,6 +44,7 @@ enum {
61668 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
61669 ADDR_NO_RANDOMIZE | \
61670 ADDR_COMPAT_LAYOUT | \
61671 + ADDR_LIMIT_3GB | \
61672 MMAP_PAGE_ZERO)
61673
61674 /*
61675 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
61676 index 77257c9..51d473a 100644
61677 --- a/include/linux/pipe_fs_i.h
61678 +++ b/include/linux/pipe_fs_i.h
61679 @@ -46,9 +46,9 @@ struct pipe_buffer {
61680 struct pipe_inode_info {
61681 wait_queue_head_t wait;
61682 unsigned int nrbufs, curbuf, buffers;
61683 - unsigned int readers;
61684 - unsigned int writers;
61685 - unsigned int waiting_writers;
61686 + atomic_t readers;
61687 + atomic_t writers;
61688 + atomic_t waiting_writers;
61689 unsigned int r_counter;
61690 unsigned int w_counter;
61691 struct page *tmp_page;
61692 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
61693 index 609daae..5392427 100644
61694 --- a/include/linux/pm_runtime.h
61695 +++ b/include/linux/pm_runtime.h
61696 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
61697
61698 static inline void pm_runtime_mark_last_busy(struct device *dev)
61699 {
61700 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
61701 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
61702 }
61703
61704 #else /* !CONFIG_PM_RUNTIME */
61705 diff --git a/include/linux/poison.h b/include/linux/poison.h
61706 index 2110a81..13a11bb 100644
61707 --- a/include/linux/poison.h
61708 +++ b/include/linux/poison.h
61709 @@ -19,8 +19,8 @@
61710 * under normal circumstances, used to verify that nobody uses
61711 * non-initialized list entries.
61712 */
61713 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
61714 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
61715 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
61716 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
61717
61718 /********** include/linux/timer.h **********/
61719 /*
61720 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
61721 index 58969b2..ead129b 100644
61722 --- a/include/linux/preempt.h
61723 +++ b/include/linux/preempt.h
61724 @@ -123,7 +123,7 @@ struct preempt_ops {
61725 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
61726 void (*sched_out)(struct preempt_notifier *notifier,
61727 struct task_struct *next);
61728 -};
61729 +} __no_const;
61730
61731 /**
61732 * preempt_notifier - key for installing preemption notifiers
61733 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
61734 index 85c5073..51fac8b 100644
61735 --- a/include/linux/proc_fs.h
61736 +++ b/include/linux/proc_fs.h
61737 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
61738 return proc_create_data(name, mode, parent, proc_fops, NULL);
61739 }
61740
61741 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
61742 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
61743 +{
61744 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61745 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
61746 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61747 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
61748 +#else
61749 + return proc_create_data(name, mode, parent, proc_fops, NULL);
61750 +#endif
61751 +}
61752 +
61753 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
61754 umode_t mode, struct proc_dir_entry *base,
61755 read_proc_t *read_proc, void * data)
61756 @@ -258,7 +270,7 @@ union proc_op {
61757 int (*proc_show)(struct seq_file *m,
61758 struct pid_namespace *ns, struct pid *pid,
61759 struct task_struct *task);
61760 -};
61761 +} __no_const;
61762
61763 struct ctl_table_header;
61764 struct ctl_table;
61765 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
61766 index c2f1f6a..6fdb196 100644
61767 --- a/include/linux/ptrace.h
61768 +++ b/include/linux/ptrace.h
61769 @@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
61770 if (unlikely(ptrace_event_enabled(current, event))) {
61771 current->ptrace_message = message;
61772 ptrace_notify((event << 8) | SIGTRAP);
61773 - } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
61774 + } else if (event == PTRACE_EVENT_EXEC) {
61775 /* legacy EXEC report via SIGTRAP */
61776 - send_sig(SIGTRAP, current, 0);
61777 + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
61778 + send_sig(SIGTRAP, current, 0);
61779 }
61780 }
61781
61782 diff --git a/include/linux/random.h b/include/linux/random.h
61783 index 8f74538..02a1012 100644
61784 --- a/include/linux/random.h
61785 +++ b/include/linux/random.h
61786 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
61787
61788 u32 prandom32(struct rnd_state *);
61789
61790 +static inline unsigned long pax_get_random_long(void)
61791 +{
61792 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
61793 +}
61794 +
61795 /*
61796 * Handle minimum values for seeds
61797 */
61798 static inline u32 __seed(u32 x, u32 m)
61799 {
61800 - return (x < m) ? x + m : x;
61801 + return (x <= m) ? x + m + 1 : x;
61802 }
61803
61804 /**
61805 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
61806 index e0879a7..a12f962 100644
61807 --- a/include/linux/reboot.h
61808 +++ b/include/linux/reboot.h
61809 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
61810 * Architecture-specific implementations of sys_reboot commands.
61811 */
61812
61813 -extern void machine_restart(char *cmd);
61814 -extern void machine_halt(void);
61815 -extern void machine_power_off(void);
61816 +extern void machine_restart(char *cmd) __noreturn;
61817 +extern void machine_halt(void) __noreturn;
61818 +extern void machine_power_off(void) __noreturn;
61819
61820 extern void machine_shutdown(void);
61821 struct pt_regs;
61822 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
61823 */
61824
61825 extern void kernel_restart_prepare(char *cmd);
61826 -extern void kernel_restart(char *cmd);
61827 -extern void kernel_halt(void);
61828 -extern void kernel_power_off(void);
61829 +extern void kernel_restart(char *cmd) __noreturn;
61830 +extern void kernel_halt(void) __noreturn;
61831 +extern void kernel_power_off(void) __noreturn;
61832
61833 extern int C_A_D; /* for sysctl */
61834 void ctrl_alt_del(void);
61835 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
61836 * Emergency restart, callable from an interrupt handler.
61837 */
61838
61839 -extern void emergency_restart(void);
61840 +extern void emergency_restart(void) __noreturn;
61841 #include <asm/emergency-restart.h>
61842
61843 #endif
61844 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
61845 index 2213ddc..650212a 100644
61846 --- a/include/linux/reiserfs_fs.h
61847 +++ b/include/linux/reiserfs_fs.h
61848 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
61849 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61850
61851 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
61852 -#define get_generation(s) atomic_read (&fs_generation(s))
61853 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
61854 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
61855 #define __fs_changed(gen,s) (gen != get_generation (s))
61856 #define fs_changed(gen,s) \
61857 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
61858 index 8c9e85c..1698e9a 100644
61859 --- a/include/linux/reiserfs_fs_sb.h
61860 +++ b/include/linux/reiserfs_fs_sb.h
61861 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
61862 /* Comment? -Hans */
61863 wait_queue_head_t s_wait;
61864 /* To be obsoleted soon by per buffer seals.. -Hans */
61865 - atomic_t s_generation_counter; // increased by one every time the
61866 + atomic_unchecked_t s_generation_counter; // increased by one every time the
61867 // tree gets re-balanced
61868 unsigned long s_properties; /* File system properties. Currently holds
61869 on-disk FS format */
61870 diff --git a/include/linux/relay.h b/include/linux/relay.h
61871 index a822fd7..62b70f6 100644
61872 --- a/include/linux/relay.h
61873 +++ b/include/linux/relay.h
61874 @@ -159,7 +159,7 @@ struct rchan_callbacks
61875 * The callback should return 0 if successful, negative if not.
61876 */
61877 int (*remove_buf_file)(struct dentry *dentry);
61878 -};
61879 +} __no_const;
61880
61881 /*
61882 * CONFIG_RELAY kernel API, kernel/relay.c
61883 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
61884 index c6c6084..5bf1212 100644
61885 --- a/include/linux/rfkill.h
61886 +++ b/include/linux/rfkill.h
61887 @@ -147,6 +147,7 @@ struct rfkill_ops {
61888 void (*query)(struct rfkill *rfkill, void *data);
61889 int (*set_block)(void *data, bool blocked);
61890 };
61891 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
61892
61893 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
61894 /**
61895 diff --git a/include/linux/rio.h b/include/linux/rio.h
61896 index 4d50611..c6858a2 100644
61897 --- a/include/linux/rio.h
61898 +++ b/include/linux/rio.h
61899 @@ -315,7 +315,7 @@ struct rio_ops {
61900 int mbox, void *buffer, size_t len);
61901 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
61902 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
61903 -};
61904 +} __no_const;
61905
61906 #define RIO_RESOURCE_MEM 0x00000100
61907 #define RIO_RESOURCE_DOORBELL 0x00000200
61908 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
61909 index 1cdd62a..e399f0d 100644
61910 --- a/include/linux/rmap.h
61911 +++ b/include/linux/rmap.h
61912 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
61913 void anon_vma_init(void); /* create anon_vma_cachep */
61914 int anon_vma_prepare(struct vm_area_struct *);
61915 void unlink_anon_vmas(struct vm_area_struct *);
61916 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
61917 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
61918 void anon_vma_moveto_tail(struct vm_area_struct *);
61919 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
61920 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
61921 void __anon_vma_link(struct vm_area_struct *);
61922
61923 static inline void anon_vma_merge(struct vm_area_struct *vma,
61924 diff --git a/include/linux/sched.h b/include/linux/sched.h
61925 index 0657368..765f70f 100644
61926 --- a/include/linux/sched.h
61927 +++ b/include/linux/sched.h
61928 @@ -101,6 +101,7 @@ struct bio_list;
61929 struct fs_struct;
61930 struct perf_event_context;
61931 struct blk_plug;
61932 +struct linux_binprm;
61933
61934 /*
61935 * List of flags we want to share for kernel threads,
61936 @@ -382,10 +383,13 @@ struct user_namespace;
61937 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
61938
61939 extern int sysctl_max_map_count;
61940 +extern unsigned long sysctl_heap_stack_gap;
61941
61942 #include <linux/aio.h>
61943
61944 #ifdef CONFIG_MMU
61945 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
61946 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
61947 extern void arch_pick_mmap_layout(struct mm_struct *mm);
61948 extern unsigned long
61949 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
61950 @@ -631,6 +635,17 @@ struct signal_struct {
61951 #ifdef CONFIG_TASKSTATS
61952 struct taskstats *stats;
61953 #endif
61954 +
61955 +#ifdef CONFIG_GRKERNSEC
61956 + u32 curr_ip;
61957 + u32 saved_ip;
61958 + u32 gr_saddr;
61959 + u32 gr_daddr;
61960 + u16 gr_sport;
61961 + u16 gr_dport;
61962 + u8 used_accept:1;
61963 +#endif
61964 +
61965 #ifdef CONFIG_AUDIT
61966 unsigned audit_tty;
61967 struct tty_audit_buf *tty_audit_buf;
61968 @@ -714,6 +729,11 @@ struct user_struct {
61969 struct key *session_keyring; /* UID's default session keyring */
61970 #endif
61971
61972 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61973 + unsigned int banned;
61974 + unsigned long ban_expires;
61975 +#endif
61976 +
61977 /* Hash table maintenance information */
61978 struct hlist_node uidhash_node;
61979 uid_t uid;
61980 @@ -1354,8 +1374,8 @@ struct task_struct {
61981 struct list_head thread_group;
61982
61983 struct completion *vfork_done; /* for vfork() */
61984 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
61985 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61986 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
61987 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61988
61989 cputime_t utime, stime, utimescaled, stimescaled;
61990 cputime_t gtime;
61991 @@ -1371,13 +1391,6 @@ struct task_struct {
61992 struct task_cputime cputime_expires;
61993 struct list_head cpu_timers[3];
61994
61995 -/* process credentials */
61996 - const struct cred __rcu *real_cred; /* objective and real subjective task
61997 - * credentials (COW) */
61998 - const struct cred __rcu *cred; /* effective (overridable) subjective task
61999 - * credentials (COW) */
62000 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62001 -
62002 char comm[TASK_COMM_LEN]; /* executable name excluding path
62003 - access with [gs]et_task_comm (which lock
62004 it with task_lock())
62005 @@ -1394,8 +1407,16 @@ struct task_struct {
62006 #endif
62007 /* CPU-specific state of this task */
62008 struct thread_struct thread;
62009 +/* thread_info moved to task_struct */
62010 +#ifdef CONFIG_X86
62011 + struct thread_info tinfo;
62012 +#endif
62013 /* filesystem information */
62014 struct fs_struct *fs;
62015 +
62016 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62017 + * credentials (COW) */
62018 +
62019 /* open file information */
62020 struct files_struct *files;
62021 /* namespaces */
62022 @@ -1442,6 +1463,11 @@ struct task_struct {
62023 struct rt_mutex_waiter *pi_blocked_on;
62024 #endif
62025
62026 +/* process credentials */
62027 + const struct cred __rcu *real_cred; /* objective and real subjective task
62028 + * credentials (COW) */
62029 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62030 +
62031 #ifdef CONFIG_DEBUG_MUTEXES
62032 /* mutex deadlock detection */
62033 struct mutex_waiter *blocked_on;
62034 @@ -1558,6 +1584,27 @@ struct task_struct {
62035 unsigned long default_timer_slack_ns;
62036
62037 struct list_head *scm_work_list;
62038 +
62039 +#ifdef CONFIG_GRKERNSEC
62040 + /* grsecurity */
62041 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62042 + u64 exec_id;
62043 +#endif
62044 +#ifdef CONFIG_GRKERNSEC_SETXID
62045 + const struct cred *delayed_cred;
62046 +#endif
62047 + struct dentry *gr_chroot_dentry;
62048 + struct acl_subject_label *acl;
62049 + struct acl_role_label *role;
62050 + struct file *exec_file;
62051 + u16 acl_role_id;
62052 + /* is this the task that authenticated to the special role */
62053 + u8 acl_sp_role;
62054 + u8 is_writable;
62055 + u8 brute;
62056 + u8 gr_is_chrooted;
62057 +#endif
62058 +
62059 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62060 /* Index of current stored address in ret_stack */
62061 int curr_ret_stack;
62062 @@ -1592,6 +1639,51 @@ struct task_struct {
62063 #endif
62064 };
62065
62066 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62067 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62068 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62069 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62070 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62071 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62072 +
62073 +#ifdef CONFIG_PAX_SOFTMODE
62074 +extern int pax_softmode;
62075 +#endif
62076 +
62077 +extern int pax_check_flags(unsigned long *);
62078 +
62079 +/* if tsk != current then task_lock must be held on it */
62080 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62081 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62082 +{
62083 + if (likely(tsk->mm))
62084 + return tsk->mm->pax_flags;
62085 + else
62086 + return 0UL;
62087 +}
62088 +
62089 +/* if tsk != current then task_lock must be held on it */
62090 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62091 +{
62092 + if (likely(tsk->mm)) {
62093 + tsk->mm->pax_flags = flags;
62094 + return 0;
62095 + }
62096 + return -EINVAL;
62097 +}
62098 +#endif
62099 +
62100 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62101 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62102 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62103 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62104 +#endif
62105 +
62106 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62107 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62108 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62109 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62110 +
62111 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62112 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62113
62114 @@ -2104,7 +2196,9 @@ void yield(void);
62115 extern struct exec_domain default_exec_domain;
62116
62117 union thread_union {
62118 +#ifndef CONFIG_X86
62119 struct thread_info thread_info;
62120 +#endif
62121 unsigned long stack[THREAD_SIZE/sizeof(long)];
62122 };
62123
62124 @@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
62125 */
62126
62127 extern struct task_struct *find_task_by_vpid(pid_t nr);
62128 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62129 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62130 struct pid_namespace *ns);
62131
62132 @@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62133 extern void exit_itimers(struct signal_struct *);
62134 extern void flush_itimer_signals(void);
62135
62136 -extern void do_group_exit(int);
62137 +extern __noreturn void do_group_exit(int);
62138
62139 extern void daemonize(const char *, ...);
62140 extern int allow_signal(int);
62141 @@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62142
62143 #endif
62144
62145 -static inline int object_is_on_stack(void *obj)
62146 +static inline int object_starts_on_stack(void *obj)
62147 {
62148 - void *stack = task_stack_page(current);
62149 + const void *stack = task_stack_page(current);
62150
62151 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62152 }
62153
62154 +#ifdef CONFIG_PAX_USERCOPY
62155 +extern int object_is_on_stack(const void *obj, unsigned long len);
62156 +#endif
62157 +
62158 extern void thread_info_cache_init(void);
62159
62160 #ifdef CONFIG_DEBUG_STACK_USAGE
62161 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62162 index 899fbb4..1cb4138 100644
62163 --- a/include/linux/screen_info.h
62164 +++ b/include/linux/screen_info.h
62165 @@ -43,7 +43,8 @@ struct screen_info {
62166 __u16 pages; /* 0x32 */
62167 __u16 vesa_attributes; /* 0x34 */
62168 __u32 capabilities; /* 0x36 */
62169 - __u8 _reserved[6]; /* 0x3a */
62170 + __u16 vesapm_size; /* 0x3a */
62171 + __u8 _reserved[4]; /* 0x3c */
62172 } __attribute__((packed));
62173
62174 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62175 diff --git a/include/linux/security.h b/include/linux/security.h
62176 index 83c18e8..2d98860 100644
62177 --- a/include/linux/security.h
62178 +++ b/include/linux/security.h
62179 @@ -37,6 +37,7 @@
62180 #include <linux/xfrm.h>
62181 #include <linux/slab.h>
62182 #include <linux/xattr.h>
62183 +#include <linux/grsecurity.h>
62184 #include <net/flow.h>
62185
62186 /* Maximum number of letters for an LSM name string */
62187 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62188 index 44f1514..2bbf6c1 100644
62189 --- a/include/linux/seq_file.h
62190 +++ b/include/linux/seq_file.h
62191 @@ -24,6 +24,9 @@ struct seq_file {
62192 struct mutex lock;
62193 const struct seq_operations *op;
62194 int poll_event;
62195 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62196 + u64 exec_id;
62197 +#endif
62198 void *private;
62199 };
62200
62201 @@ -33,6 +36,7 @@ struct seq_operations {
62202 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62203 int (*show) (struct seq_file *m, void *v);
62204 };
62205 +typedef struct seq_operations __no_const seq_operations_no_const;
62206
62207 #define SEQ_SKIP 1
62208
62209 diff --git a/include/linux/shm.h b/include/linux/shm.h
62210 index 92808b8..c28cac4 100644
62211 --- a/include/linux/shm.h
62212 +++ b/include/linux/shm.h
62213 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62214
62215 /* The task created the shm object. NULL if the task is dead. */
62216 struct task_struct *shm_creator;
62217 +#ifdef CONFIG_GRKERNSEC
62218 + time_t shm_createtime;
62219 + pid_t shm_lapid;
62220 +#endif
62221 };
62222
62223 /* shm_mode upper byte flags */
62224 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62225 index ae86ade..2b51468 100644
62226 --- a/include/linux/skbuff.h
62227 +++ b/include/linux/skbuff.h
62228 @@ -654,7 +654,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62229 */
62230 static inline int skb_queue_empty(const struct sk_buff_head *list)
62231 {
62232 - return list->next == (struct sk_buff *)list;
62233 + return list->next == (const struct sk_buff *)list;
62234 }
62235
62236 /**
62237 @@ -667,7 +667,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62238 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62239 const struct sk_buff *skb)
62240 {
62241 - return skb->next == (struct sk_buff *)list;
62242 + return skb->next == (const struct sk_buff *)list;
62243 }
62244
62245 /**
62246 @@ -680,7 +680,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62247 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62248 const struct sk_buff *skb)
62249 {
62250 - return skb->prev == (struct sk_buff *)list;
62251 + return skb->prev == (const struct sk_buff *)list;
62252 }
62253
62254 /**
62255 @@ -1545,7 +1545,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62256 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62257 */
62258 #ifndef NET_SKB_PAD
62259 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62260 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62261 #endif
62262
62263 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62264 diff --git a/include/linux/slab.h b/include/linux/slab.h
62265 index 573c809..e84c132 100644
62266 --- a/include/linux/slab.h
62267 +++ b/include/linux/slab.h
62268 @@ -11,12 +11,20 @@
62269
62270 #include <linux/gfp.h>
62271 #include <linux/types.h>
62272 +#include <linux/err.h>
62273
62274 /*
62275 * Flags to pass to kmem_cache_create().
62276 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62277 */
62278 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62279 +
62280 +#ifdef CONFIG_PAX_USERCOPY
62281 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62282 +#else
62283 +#define SLAB_USERCOPY 0x00000000UL
62284 +#endif
62285 +
62286 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62287 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62288 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62289 @@ -87,10 +95,13 @@
62290 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62291 * Both make kfree a no-op.
62292 */
62293 -#define ZERO_SIZE_PTR ((void *)16)
62294 +#define ZERO_SIZE_PTR \
62295 +({ \
62296 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62297 + (void *)(-MAX_ERRNO-1L); \
62298 +})
62299
62300 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62301 - (unsigned long)ZERO_SIZE_PTR)
62302 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62303
62304 /*
62305 * struct kmem_cache related prototypes
62306 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62307 void kfree(const void *);
62308 void kzfree(const void *);
62309 size_t ksize(const void *);
62310 +void check_object_size(const void *ptr, unsigned long n, bool to);
62311
62312 /*
62313 * Allocator specific definitions. These are mainly used to establish optimized
62314 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
62315
62316 void __init kmem_cache_init_late(void);
62317
62318 +#define kmalloc(x, y) \
62319 +({ \
62320 + void *___retval; \
62321 + intoverflow_t ___x = (intoverflow_t)x; \
62322 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
62323 + ___retval = NULL; \
62324 + else \
62325 + ___retval = kmalloc((size_t)___x, (y)); \
62326 + ___retval; \
62327 +})
62328 +
62329 +#define kmalloc_node(x, y, z) \
62330 +({ \
62331 + void *___retval; \
62332 + intoverflow_t ___x = (intoverflow_t)x; \
62333 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
62334 + ___retval = NULL; \
62335 + else \
62336 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
62337 + ___retval; \
62338 +})
62339 +
62340 +#define kzalloc(x, y) \
62341 +({ \
62342 + void *___retval; \
62343 + intoverflow_t ___x = (intoverflow_t)x; \
62344 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
62345 + ___retval = NULL; \
62346 + else \
62347 + ___retval = kzalloc((size_t)___x, (y)); \
62348 + ___retval; \
62349 +})
62350 +
62351 +#define __krealloc(x, y, z) \
62352 +({ \
62353 + void *___retval; \
62354 + intoverflow_t ___y = (intoverflow_t)y; \
62355 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
62356 + ___retval = NULL; \
62357 + else \
62358 + ___retval = __krealloc((x), (size_t)___y, (z)); \
62359 + ___retval; \
62360 +})
62361 +
62362 +#define krealloc(x, y, z) \
62363 +({ \
62364 + void *___retval; \
62365 + intoverflow_t ___y = (intoverflow_t)y; \
62366 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
62367 + ___retval = NULL; \
62368 + else \
62369 + ___retval = krealloc((x), (size_t)___y, (z)); \
62370 + ___retval; \
62371 +})
62372 +
62373 #endif /* _LINUX_SLAB_H */
62374 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62375 index fbd1117..1e5e46c 100644
62376 --- a/include/linux/slab_def.h
62377 +++ b/include/linux/slab_def.h
62378 @@ -66,10 +66,10 @@ struct kmem_cache {
62379 unsigned long node_allocs;
62380 unsigned long node_frees;
62381 unsigned long node_overflow;
62382 - atomic_t allochit;
62383 - atomic_t allocmiss;
62384 - atomic_t freehit;
62385 - atomic_t freemiss;
62386 + atomic_unchecked_t allochit;
62387 + atomic_unchecked_t allocmiss;
62388 + atomic_unchecked_t freehit;
62389 + atomic_unchecked_t freemiss;
62390
62391 /*
62392 * If debugging is enabled, then the allocator can add additional
62393 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62394 index a32bcfd..53b71f4 100644
62395 --- a/include/linux/slub_def.h
62396 +++ b/include/linux/slub_def.h
62397 @@ -89,7 +89,7 @@ struct kmem_cache {
62398 struct kmem_cache_order_objects max;
62399 struct kmem_cache_order_objects min;
62400 gfp_t allocflags; /* gfp flags to use on each alloc */
62401 - int refcount; /* Refcount for slab cache destroy */
62402 + atomic_t refcount; /* Refcount for slab cache destroy */
62403 void (*ctor)(void *);
62404 int inuse; /* Offset to metadata */
62405 int align; /* Alignment */
62406 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62407 }
62408
62409 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62410 -void *__kmalloc(size_t size, gfp_t flags);
62411 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
62412
62413 static __always_inline void *
62414 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62415 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62416 index de8832d..0147b46 100644
62417 --- a/include/linux/sonet.h
62418 +++ b/include/linux/sonet.h
62419 @@ -61,7 +61,7 @@ struct sonet_stats {
62420 #include <linux/atomic.h>
62421
62422 struct k_sonet_stats {
62423 -#define __HANDLE_ITEM(i) atomic_t i
62424 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62425 __SONET_ITEMS
62426 #undef __HANDLE_ITEM
62427 };
62428 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62429 index 2c5993a..b0e79f0 100644
62430 --- a/include/linux/sunrpc/clnt.h
62431 +++ b/include/linux/sunrpc/clnt.h
62432 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62433 {
62434 switch (sap->sa_family) {
62435 case AF_INET:
62436 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62437 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62438 case AF_INET6:
62439 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62440 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62441 }
62442 return 0;
62443 }
62444 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62445 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62446 const struct sockaddr *src)
62447 {
62448 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62449 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62450 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62451
62452 dsin->sin_family = ssin->sin_family;
62453 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62454 if (sa->sa_family != AF_INET6)
62455 return 0;
62456
62457 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62458 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62459 }
62460
62461 #endif /* __KERNEL__ */
62462 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62463 index e775689..9e206d9 100644
62464 --- a/include/linux/sunrpc/sched.h
62465 +++ b/include/linux/sunrpc/sched.h
62466 @@ -105,6 +105,7 @@ struct rpc_call_ops {
62467 void (*rpc_call_done)(struct rpc_task *, void *);
62468 void (*rpc_release)(void *);
62469 };
62470 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62471
62472 struct rpc_task_setup {
62473 struct rpc_task *task;
62474 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62475 index c14fe86..393245e 100644
62476 --- a/include/linux/sunrpc/svc_rdma.h
62477 +++ b/include/linux/sunrpc/svc_rdma.h
62478 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62479 extern unsigned int svcrdma_max_requests;
62480 extern unsigned int svcrdma_max_req_size;
62481
62482 -extern atomic_t rdma_stat_recv;
62483 -extern atomic_t rdma_stat_read;
62484 -extern atomic_t rdma_stat_write;
62485 -extern atomic_t rdma_stat_sq_starve;
62486 -extern atomic_t rdma_stat_rq_starve;
62487 -extern atomic_t rdma_stat_rq_poll;
62488 -extern atomic_t rdma_stat_rq_prod;
62489 -extern atomic_t rdma_stat_sq_poll;
62490 -extern atomic_t rdma_stat_sq_prod;
62491 +extern atomic_unchecked_t rdma_stat_recv;
62492 +extern atomic_unchecked_t rdma_stat_read;
62493 +extern atomic_unchecked_t rdma_stat_write;
62494 +extern atomic_unchecked_t rdma_stat_sq_starve;
62495 +extern atomic_unchecked_t rdma_stat_rq_starve;
62496 +extern atomic_unchecked_t rdma_stat_rq_poll;
62497 +extern atomic_unchecked_t rdma_stat_rq_prod;
62498 +extern atomic_unchecked_t rdma_stat_sq_poll;
62499 +extern atomic_unchecked_t rdma_stat_sq_prod;
62500
62501 #define RPCRDMA_VERSION 1
62502
62503 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62504 index bb9127d..34ab358 100644
62505 --- a/include/linux/sysctl.h
62506 +++ b/include/linux/sysctl.h
62507 @@ -155,7 +155,11 @@ enum
62508 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62509 };
62510
62511 -
62512 +#ifdef CONFIG_PAX_SOFTMODE
62513 +enum {
62514 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62515 +};
62516 +#endif
62517
62518 /* CTL_VM names: */
62519 enum
62520 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62521
62522 extern int proc_dostring(struct ctl_table *, int,
62523 void __user *, size_t *, loff_t *);
62524 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62525 + void __user *, size_t *, loff_t *);
62526 extern int proc_dointvec(struct ctl_table *, int,
62527 void __user *, size_t *, loff_t *);
62528 extern int proc_dointvec_minmax(struct ctl_table *, int,
62529 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
62530 index a71a292..51bd91d 100644
62531 --- a/include/linux/tracehook.h
62532 +++ b/include/linux/tracehook.h
62533 @@ -54,12 +54,12 @@ struct linux_binprm;
62534 /*
62535 * ptrace report for syscall entry and exit looks identical.
62536 */
62537 -static inline void ptrace_report_syscall(struct pt_regs *regs)
62538 +static inline int ptrace_report_syscall(struct pt_regs *regs)
62539 {
62540 int ptrace = current->ptrace;
62541
62542 if (!(ptrace & PT_PTRACED))
62543 - return;
62544 + return 0;
62545
62546 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
62547
62548 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62549 send_sig(current->exit_code, current, 1);
62550 current->exit_code = 0;
62551 }
62552 +
62553 + return fatal_signal_pending(current);
62554 }
62555
62556 /**
62557 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62558 static inline __must_check int tracehook_report_syscall_entry(
62559 struct pt_regs *regs)
62560 {
62561 - ptrace_report_syscall(regs);
62562 - return 0;
62563 + return ptrace_report_syscall(regs);
62564 }
62565
62566 /**
62567 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62568 index ff7dc08..893e1bd 100644
62569 --- a/include/linux/tty_ldisc.h
62570 +++ b/include/linux/tty_ldisc.h
62571 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62572
62573 struct module *owner;
62574
62575 - int refcount;
62576 + atomic_t refcount;
62577 };
62578
62579 struct tty_ldisc {
62580 diff --git a/include/linux/types.h b/include/linux/types.h
62581 index e5fa503..df6e8a4 100644
62582 --- a/include/linux/types.h
62583 +++ b/include/linux/types.h
62584 @@ -214,10 +214,26 @@ typedef struct {
62585 int counter;
62586 } atomic_t;
62587
62588 +#ifdef CONFIG_PAX_REFCOUNT
62589 +typedef struct {
62590 + int counter;
62591 +} atomic_unchecked_t;
62592 +#else
62593 +typedef atomic_t atomic_unchecked_t;
62594 +#endif
62595 +
62596 #ifdef CONFIG_64BIT
62597 typedef struct {
62598 long counter;
62599 } atomic64_t;
62600 +
62601 +#ifdef CONFIG_PAX_REFCOUNT
62602 +typedef struct {
62603 + long counter;
62604 +} atomic64_unchecked_t;
62605 +#else
62606 +typedef atomic64_t atomic64_unchecked_t;
62607 +#endif
62608 #endif
62609
62610 struct list_head {
62611 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62612 index 5ca0951..ab496a5 100644
62613 --- a/include/linux/uaccess.h
62614 +++ b/include/linux/uaccess.h
62615 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62616 long ret; \
62617 mm_segment_t old_fs = get_fs(); \
62618 \
62619 - set_fs(KERNEL_DS); \
62620 pagefault_disable(); \
62621 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62622 - pagefault_enable(); \
62623 + set_fs(KERNEL_DS); \
62624 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62625 set_fs(old_fs); \
62626 + pagefault_enable(); \
62627 ret; \
62628 })
62629
62630 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
62631 index 99c1b4d..bb94261 100644
62632 --- a/include/linux/unaligned/access_ok.h
62633 +++ b/include/linux/unaligned/access_ok.h
62634 @@ -6,32 +6,32 @@
62635
62636 static inline u16 get_unaligned_le16(const void *p)
62637 {
62638 - return le16_to_cpup((__le16 *)p);
62639 + return le16_to_cpup((const __le16 *)p);
62640 }
62641
62642 static inline u32 get_unaligned_le32(const void *p)
62643 {
62644 - return le32_to_cpup((__le32 *)p);
62645 + return le32_to_cpup((const __le32 *)p);
62646 }
62647
62648 static inline u64 get_unaligned_le64(const void *p)
62649 {
62650 - return le64_to_cpup((__le64 *)p);
62651 + return le64_to_cpup((const __le64 *)p);
62652 }
62653
62654 static inline u16 get_unaligned_be16(const void *p)
62655 {
62656 - return be16_to_cpup((__be16 *)p);
62657 + return be16_to_cpup((const __be16 *)p);
62658 }
62659
62660 static inline u32 get_unaligned_be32(const void *p)
62661 {
62662 - return be32_to_cpup((__be32 *)p);
62663 + return be32_to_cpup((const __be32 *)p);
62664 }
62665
62666 static inline u64 get_unaligned_be64(const void *p)
62667 {
62668 - return be64_to_cpup((__be64 *)p);
62669 + return be64_to_cpup((const __be64 *)p);
62670 }
62671
62672 static inline void put_unaligned_le16(u16 val, void *p)
62673 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
62674 index 0d3f988..000f101 100644
62675 --- a/include/linux/usb/renesas_usbhs.h
62676 +++ b/include/linux/usb/renesas_usbhs.h
62677 @@ -39,7 +39,7 @@ enum {
62678 */
62679 struct renesas_usbhs_driver_callback {
62680 int (*notify_hotplug)(struct platform_device *pdev);
62681 -};
62682 +} __no_const;
62683
62684 /*
62685 * callback functions for platform
62686 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
62687 * VBUS control is needed for Host
62688 */
62689 int (*set_vbus)(struct platform_device *pdev, int enable);
62690 -};
62691 +} __no_const;
62692
62693 /*
62694 * parameters for renesas usbhs
62695 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
62696 index 6f8fbcf..8259001 100644
62697 --- a/include/linux/vermagic.h
62698 +++ b/include/linux/vermagic.h
62699 @@ -25,9 +25,35 @@
62700 #define MODULE_ARCH_VERMAGIC ""
62701 #endif
62702
62703 +#ifdef CONFIG_PAX_REFCOUNT
62704 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
62705 +#else
62706 +#define MODULE_PAX_REFCOUNT ""
62707 +#endif
62708 +
62709 +#ifdef CONSTIFY_PLUGIN
62710 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
62711 +#else
62712 +#define MODULE_CONSTIFY_PLUGIN ""
62713 +#endif
62714 +
62715 +#ifdef STACKLEAK_PLUGIN
62716 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
62717 +#else
62718 +#define MODULE_STACKLEAK_PLUGIN ""
62719 +#endif
62720 +
62721 +#ifdef CONFIG_GRKERNSEC
62722 +#define MODULE_GRSEC "GRSEC "
62723 +#else
62724 +#define MODULE_GRSEC ""
62725 +#endif
62726 +
62727 #define VERMAGIC_STRING \
62728 UTS_RELEASE " " \
62729 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
62730 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
62731 - MODULE_ARCH_VERMAGIC
62732 + MODULE_ARCH_VERMAGIC \
62733 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
62734 + MODULE_GRSEC
62735
62736 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
62737 index dcdfc2b..f937197 100644
62738 --- a/include/linux/vmalloc.h
62739 +++ b/include/linux/vmalloc.h
62740 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
62741 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
62742 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
62743 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
62744 +
62745 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
62746 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
62747 +#endif
62748 +
62749 /* bits [20..32] reserved for arch specific ioremap internals */
62750
62751 /*
62752 @@ -157,4 +162,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
62753 # endif
62754 #endif
62755
62756 +#define vmalloc(x) \
62757 +({ \
62758 + void *___retval; \
62759 + intoverflow_t ___x = (intoverflow_t)x; \
62760 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
62761 + ___retval = NULL; \
62762 + else \
62763 + ___retval = vmalloc((unsigned long)___x); \
62764 + ___retval; \
62765 +})
62766 +
62767 +#define vzalloc(x) \
62768 +({ \
62769 + void *___retval; \
62770 + intoverflow_t ___x = (intoverflow_t)x; \
62771 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
62772 + ___retval = NULL; \
62773 + else \
62774 + ___retval = vzalloc((unsigned long)___x); \
62775 + ___retval; \
62776 +})
62777 +
62778 +#define __vmalloc(x, y, z) \
62779 +({ \
62780 + void *___retval; \
62781 + intoverflow_t ___x = (intoverflow_t)x; \
62782 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
62783 + ___retval = NULL; \
62784 + else \
62785 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
62786 + ___retval; \
62787 +})
62788 +
62789 +#define vmalloc_user(x) \
62790 +({ \
62791 + void *___retval; \
62792 + intoverflow_t ___x = (intoverflow_t)x; \
62793 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
62794 + ___retval = NULL; \
62795 + else \
62796 + ___retval = vmalloc_user((unsigned long)___x); \
62797 + ___retval; \
62798 +})
62799 +
62800 +#define vmalloc_exec(x) \
62801 +({ \
62802 + void *___retval; \
62803 + intoverflow_t ___x = (intoverflow_t)x; \
62804 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
62805 + ___retval = NULL; \
62806 + else \
62807 + ___retval = vmalloc_exec((unsigned long)___x); \
62808 + ___retval; \
62809 +})
62810 +
62811 +#define vmalloc_node(x, y) \
62812 +({ \
62813 + void *___retval; \
62814 + intoverflow_t ___x = (intoverflow_t)x; \
62815 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
62816 + ___retval = NULL; \
62817 + else \
62818 + ___retval = vmalloc_node((unsigned long)___x, (y));\
62819 + ___retval; \
62820 +})
62821 +
62822 +#define vzalloc_node(x, y) \
62823 +({ \
62824 + void *___retval; \
62825 + intoverflow_t ___x = (intoverflow_t)x; \
62826 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
62827 + ___retval = NULL; \
62828 + else \
62829 + ___retval = vzalloc_node((unsigned long)___x, (y));\
62830 + ___retval; \
62831 +})
62832 +
62833 +#define vmalloc_32(x) \
62834 +({ \
62835 + void *___retval; \
62836 + intoverflow_t ___x = (intoverflow_t)x; \
62837 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
62838 + ___retval = NULL; \
62839 + else \
62840 + ___retval = vmalloc_32((unsigned long)___x); \
62841 + ___retval; \
62842 +})
62843 +
62844 +#define vmalloc_32_user(x) \
62845 +({ \
62846 +void *___retval; \
62847 + intoverflow_t ___x = (intoverflow_t)x; \
62848 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
62849 + ___retval = NULL; \
62850 + else \
62851 + ___retval = vmalloc_32_user((unsigned long)___x);\
62852 + ___retval; \
62853 +})
62854 +
62855 #endif /* _LINUX_VMALLOC_H */
62856 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
62857 index 65efb92..137adbb 100644
62858 --- a/include/linux/vmstat.h
62859 +++ b/include/linux/vmstat.h
62860 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
62861 /*
62862 * Zone based page accounting with per cpu differentials.
62863 */
62864 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62865 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62866
62867 static inline void zone_page_state_add(long x, struct zone *zone,
62868 enum zone_stat_item item)
62869 {
62870 - atomic_long_add(x, &zone->vm_stat[item]);
62871 - atomic_long_add(x, &vm_stat[item]);
62872 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
62873 + atomic_long_add_unchecked(x, &vm_stat[item]);
62874 }
62875
62876 static inline unsigned long global_page_state(enum zone_stat_item item)
62877 {
62878 - long x = atomic_long_read(&vm_stat[item]);
62879 + long x = atomic_long_read_unchecked(&vm_stat[item]);
62880 #ifdef CONFIG_SMP
62881 if (x < 0)
62882 x = 0;
62883 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
62884 static inline unsigned long zone_page_state(struct zone *zone,
62885 enum zone_stat_item item)
62886 {
62887 - long x = atomic_long_read(&zone->vm_stat[item]);
62888 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
62889 #ifdef CONFIG_SMP
62890 if (x < 0)
62891 x = 0;
62892 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
62893 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
62894 enum zone_stat_item item)
62895 {
62896 - long x = atomic_long_read(&zone->vm_stat[item]);
62897 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
62898
62899 #ifdef CONFIG_SMP
62900 int cpu;
62901 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
62902
62903 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
62904 {
62905 - atomic_long_inc(&zone->vm_stat[item]);
62906 - atomic_long_inc(&vm_stat[item]);
62907 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
62908 + atomic_long_inc_unchecked(&vm_stat[item]);
62909 }
62910
62911 static inline void __inc_zone_page_state(struct page *page,
62912 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
62913
62914 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
62915 {
62916 - atomic_long_dec(&zone->vm_stat[item]);
62917 - atomic_long_dec(&vm_stat[item]);
62918 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
62919 + atomic_long_dec_unchecked(&vm_stat[item]);
62920 }
62921
62922 static inline void __dec_zone_page_state(struct page *page,
62923 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
62924 index e5d1220..ef6e406 100644
62925 --- a/include/linux/xattr.h
62926 +++ b/include/linux/xattr.h
62927 @@ -57,6 +57,11 @@
62928 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
62929 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
62930
62931 +/* User namespace */
62932 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
62933 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
62934 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
62935 +
62936 #ifdef __KERNEL__
62937
62938 #include <linux/types.h>
62939 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
62940 index 4aeff96..b378cdc 100644
62941 --- a/include/media/saa7146_vv.h
62942 +++ b/include/media/saa7146_vv.h
62943 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
62944 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
62945
62946 /* the extension can override this */
62947 - struct v4l2_ioctl_ops ops;
62948 + v4l2_ioctl_ops_no_const ops;
62949 /* pointer to the saa7146 core ops */
62950 const struct v4l2_ioctl_ops *core_ops;
62951
62952 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
62953 index c7c40f1..4f01585 100644
62954 --- a/include/media/v4l2-dev.h
62955 +++ b/include/media/v4l2-dev.h
62956 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
62957
62958
62959 struct v4l2_file_operations {
62960 - struct module *owner;
62961 + struct module * const owner;
62962 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
62963 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
62964 unsigned int (*poll) (struct file *, struct poll_table_struct *);
62965 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
62966 int (*open) (struct file *);
62967 int (*release) (struct file *);
62968 };
62969 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
62970
62971 /*
62972 * Newer version of video_device, handled by videodev2.c
62973 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
62974 index 3f5d60f..44210ed 100644
62975 --- a/include/media/v4l2-ioctl.h
62976 +++ b/include/media/v4l2-ioctl.h
62977 @@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
62978 long (*vidioc_default) (struct file *file, void *fh,
62979 bool valid_prio, int cmd, void *arg);
62980 };
62981 -
62982 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
62983
62984 /* v4l debugging and diagnostics */
62985
62986 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
62987 index 8d55251..dfe5b0a 100644
62988 --- a/include/net/caif/caif_hsi.h
62989 +++ b/include/net/caif/caif_hsi.h
62990 @@ -98,7 +98,7 @@ struct cfhsi_drv {
62991 void (*rx_done_cb) (struct cfhsi_drv *drv);
62992 void (*wake_up_cb) (struct cfhsi_drv *drv);
62993 void (*wake_down_cb) (struct cfhsi_drv *drv);
62994 -};
62995 +} __no_const;
62996
62997 /* Structure implemented by HSI device. */
62998 struct cfhsi_dev {
62999 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63000 index 9e5425b..8136ffc 100644
63001 --- a/include/net/caif/cfctrl.h
63002 +++ b/include/net/caif/cfctrl.h
63003 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63004 void (*radioset_rsp)(void);
63005 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63006 struct cflayer *client_layer);
63007 -};
63008 +} __no_const;
63009
63010 /* Link Setup Parameters for CAIF-Links. */
63011 struct cfctrl_link_param {
63012 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63013 struct cfctrl {
63014 struct cfsrvl serv;
63015 struct cfctrl_rsp res;
63016 - atomic_t req_seq_no;
63017 - atomic_t rsp_seq_no;
63018 + atomic_unchecked_t req_seq_no;
63019 + atomic_unchecked_t rsp_seq_no;
63020 struct list_head list;
63021 /* Protects from simultaneous access to first_req list */
63022 spinlock_t info_list_lock;
63023 diff --git a/include/net/flow.h b/include/net/flow.h
63024 index 6c469db..7743b8e 100644
63025 --- a/include/net/flow.h
63026 +++ b/include/net/flow.h
63027 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63028
63029 extern void flow_cache_flush(void);
63030 extern void flow_cache_flush_deferred(void);
63031 -extern atomic_t flow_cache_genid;
63032 +extern atomic_unchecked_t flow_cache_genid;
63033
63034 #endif
63035 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63036 index b94765e..053f68b 100644
63037 --- a/include/net/inetpeer.h
63038 +++ b/include/net/inetpeer.h
63039 @@ -48,8 +48,8 @@ struct inet_peer {
63040 */
63041 union {
63042 struct {
63043 - atomic_t rid; /* Frag reception counter */
63044 - atomic_t ip_id_count; /* IP ID for the next packet */
63045 + atomic_unchecked_t rid; /* Frag reception counter */
63046 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63047 __u32 tcp_ts;
63048 __u32 tcp_ts_stamp;
63049 };
63050 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63051 more++;
63052 inet_peer_refcheck(p);
63053 do {
63054 - old = atomic_read(&p->ip_id_count);
63055 + old = atomic_read_unchecked(&p->ip_id_count);
63056 new = old + more;
63057 if (!new)
63058 new = 1;
63059 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63060 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63061 return new;
63062 }
63063
63064 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63065 index 10422ef..662570f 100644
63066 --- a/include/net/ip_fib.h
63067 +++ b/include/net/ip_fib.h
63068 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63069
63070 #define FIB_RES_SADDR(net, res) \
63071 ((FIB_RES_NH(res).nh_saddr_genid == \
63072 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63073 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63074 FIB_RES_NH(res).nh_saddr : \
63075 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63076 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63077 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63078 index ebe517f..1bd286b 100644
63079 --- a/include/net/ip_vs.h
63080 +++ b/include/net/ip_vs.h
63081 @@ -509,7 +509,7 @@ struct ip_vs_conn {
63082 struct ip_vs_conn *control; /* Master control connection */
63083 atomic_t n_control; /* Number of controlled ones */
63084 struct ip_vs_dest *dest; /* real server */
63085 - atomic_t in_pkts; /* incoming packet counter */
63086 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63087
63088 /* packet transmitter for different forwarding methods. If it
63089 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63090 @@ -647,7 +647,7 @@ struct ip_vs_dest {
63091 __be16 port; /* port number of the server */
63092 union nf_inet_addr addr; /* IP address of the server */
63093 volatile unsigned flags; /* dest status flags */
63094 - atomic_t conn_flags; /* flags to copy to conn */
63095 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63096 atomic_t weight; /* server weight */
63097
63098 atomic_t refcnt; /* reference counter */
63099 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63100 index 69b610a..fe3962c 100644
63101 --- a/include/net/irda/ircomm_core.h
63102 +++ b/include/net/irda/ircomm_core.h
63103 @@ -51,7 +51,7 @@ typedef struct {
63104 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63105 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63106 struct ircomm_info *);
63107 -} call_t;
63108 +} __no_const call_t;
63109
63110 struct ircomm_cb {
63111 irda_queue_t queue;
63112 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63113 index 59ba38bc..d515662 100644
63114 --- a/include/net/irda/ircomm_tty.h
63115 +++ b/include/net/irda/ircomm_tty.h
63116 @@ -35,6 +35,7 @@
63117 #include <linux/termios.h>
63118 #include <linux/timer.h>
63119 #include <linux/tty.h> /* struct tty_struct */
63120 +#include <asm/local.h>
63121
63122 #include <net/irda/irias_object.h>
63123 #include <net/irda/ircomm_core.h>
63124 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63125 unsigned short close_delay;
63126 unsigned short closing_wait; /* time to wait before closing */
63127
63128 - int open_count;
63129 - int blocked_open; /* # of blocked opens */
63130 + local_t open_count;
63131 + local_t blocked_open; /* # of blocked opens */
63132
63133 /* Protect concurent access to :
63134 * o self->open_count
63135 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63136 index 0954ec9..7413562 100644
63137 --- a/include/net/iucv/af_iucv.h
63138 +++ b/include/net/iucv/af_iucv.h
63139 @@ -138,7 +138,7 @@ struct iucv_sock {
63140 struct iucv_sock_list {
63141 struct hlist_head head;
63142 rwlock_t lock;
63143 - atomic_t autobind_name;
63144 + atomic_unchecked_t autobind_name;
63145 };
63146
63147 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63148 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63149 index 34c996f..bb3b4d4 100644
63150 --- a/include/net/neighbour.h
63151 +++ b/include/net/neighbour.h
63152 @@ -123,7 +123,7 @@ struct neigh_ops {
63153 void (*error_report)(struct neighbour *, struct sk_buff *);
63154 int (*output)(struct neighbour *, struct sk_buff *);
63155 int (*connected_output)(struct neighbour *, struct sk_buff *);
63156 -};
63157 +} __do_const;
63158
63159 struct pneigh_entry {
63160 struct pneigh_entry *next;
63161 diff --git a/include/net/netlink.h b/include/net/netlink.h
63162 index cb1f350..3279d2c 100644
63163 --- a/include/net/netlink.h
63164 +++ b/include/net/netlink.h
63165 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63166 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63167 {
63168 if (mark)
63169 - skb_trim(skb, (unsigned char *) mark - skb->data);
63170 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63171 }
63172
63173 /**
63174 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63175 index bbd023a..97c6d0d 100644
63176 --- a/include/net/netns/ipv4.h
63177 +++ b/include/net/netns/ipv4.h
63178 @@ -57,8 +57,8 @@ struct netns_ipv4 {
63179 unsigned int sysctl_ping_group_range[2];
63180 long sysctl_tcp_mem[3];
63181
63182 - atomic_t rt_genid;
63183 - atomic_t dev_addr_genid;
63184 + atomic_unchecked_t rt_genid;
63185 + atomic_unchecked_t dev_addr_genid;
63186
63187 #ifdef CONFIG_IP_MROUTE
63188 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63189 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63190 index d368561..96aaa17 100644
63191 --- a/include/net/sctp/sctp.h
63192 +++ b/include/net/sctp/sctp.h
63193 @@ -318,9 +318,9 @@ do { \
63194
63195 #else /* SCTP_DEBUG */
63196
63197 -#define SCTP_DEBUG_PRINTK(whatever...)
63198 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63199 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63200 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63201 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63202 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63203 #define SCTP_ENABLE_DEBUG
63204 #define SCTP_DISABLE_DEBUG
63205 #define SCTP_ASSERT(expr, str, func)
63206 diff --git a/include/net/sock.h b/include/net/sock.h
63207 index 91c1c8b..15ae923 100644
63208 --- a/include/net/sock.h
63209 +++ b/include/net/sock.h
63210 @@ -299,7 +299,7 @@ struct sock {
63211 #ifdef CONFIG_RPS
63212 __u32 sk_rxhash;
63213 #endif
63214 - atomic_t sk_drops;
63215 + atomic_unchecked_t sk_drops;
63216 int sk_rcvbuf;
63217
63218 struct sk_filter __rcu *sk_filter;
63219 @@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63220 }
63221
63222 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63223 - char __user *from, char *to,
63224 + char __user *from, unsigned char *to,
63225 int copy, int offset)
63226 {
63227 if (skb->ip_summed == CHECKSUM_NONE) {
63228 diff --git a/include/net/tcp.h b/include/net/tcp.h
63229 index 2d80c29..aa07caf 100644
63230 --- a/include/net/tcp.h
63231 +++ b/include/net/tcp.h
63232 @@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
63233 char *name;
63234 sa_family_t family;
63235 const struct file_operations *seq_fops;
63236 - struct seq_operations seq_ops;
63237 + seq_operations_no_const seq_ops;
63238 };
63239
63240 struct tcp_iter_state {
63241 diff --git a/include/net/udp.h b/include/net/udp.h
63242 index e39592f..fef9680 100644
63243 --- a/include/net/udp.h
63244 +++ b/include/net/udp.h
63245 @@ -243,7 +243,7 @@ struct udp_seq_afinfo {
63246 sa_family_t family;
63247 struct udp_table *udp_table;
63248 const struct file_operations *seq_fops;
63249 - struct seq_operations seq_ops;
63250 + seq_operations_no_const seq_ops;
63251 };
63252
63253 struct udp_iter_state {
63254 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63255 index 89174e2..1f82598 100644
63256 --- a/include/net/xfrm.h
63257 +++ b/include/net/xfrm.h
63258 @@ -505,7 +505,7 @@ struct xfrm_policy {
63259 struct timer_list timer;
63260
63261 struct flow_cache_object flo;
63262 - atomic_t genid;
63263 + atomic_unchecked_t genid;
63264 u32 priority;
63265 u32 index;
63266 struct xfrm_mark mark;
63267 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63268 index 1a046b1..ee0bef0 100644
63269 --- a/include/rdma/iw_cm.h
63270 +++ b/include/rdma/iw_cm.h
63271 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63272 int backlog);
63273
63274 int (*destroy_listen)(struct iw_cm_id *cm_id);
63275 -};
63276 +} __no_const;
63277
63278 /**
63279 * iw_create_cm_id - Create an IW CM identifier.
63280 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63281 index 6a3922f..0b73022 100644
63282 --- a/include/scsi/libfc.h
63283 +++ b/include/scsi/libfc.h
63284 @@ -748,6 +748,7 @@ struct libfc_function_template {
63285 */
63286 void (*disc_stop_final) (struct fc_lport *);
63287 };
63288 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63289
63290 /**
63291 * struct fc_disc - Discovery context
63292 @@ -851,7 +852,7 @@ struct fc_lport {
63293 struct fc_vport *vport;
63294
63295 /* Operational Information */
63296 - struct libfc_function_template tt;
63297 + libfc_function_template_no_const tt;
63298 u8 link_up;
63299 u8 qfull;
63300 enum fc_lport_state state;
63301 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63302 index 77273f2..dd4031f 100644
63303 --- a/include/scsi/scsi_device.h
63304 +++ b/include/scsi/scsi_device.h
63305 @@ -161,9 +161,9 @@ struct scsi_device {
63306 unsigned int max_device_blocked; /* what device_blocked counts down from */
63307 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63308
63309 - atomic_t iorequest_cnt;
63310 - atomic_t iodone_cnt;
63311 - atomic_t ioerr_cnt;
63312 + atomic_unchecked_t iorequest_cnt;
63313 + atomic_unchecked_t iodone_cnt;
63314 + atomic_unchecked_t ioerr_cnt;
63315
63316 struct device sdev_gendev,
63317 sdev_dev;
63318 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63319 index 2a65167..91e01f8 100644
63320 --- a/include/scsi/scsi_transport_fc.h
63321 +++ b/include/scsi/scsi_transport_fc.h
63322 @@ -711,7 +711,7 @@ struct fc_function_template {
63323 unsigned long show_host_system_hostname:1;
63324
63325 unsigned long disable_target_scan:1;
63326 -};
63327 +} __do_const;
63328
63329
63330 /**
63331 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63332 index 030b87c..98a6954 100644
63333 --- a/include/sound/ak4xxx-adda.h
63334 +++ b/include/sound/ak4xxx-adda.h
63335 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63336 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63337 unsigned char val);
63338 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63339 -};
63340 +} __no_const;
63341
63342 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63343
63344 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63345 index 8c05e47..2b5df97 100644
63346 --- a/include/sound/hwdep.h
63347 +++ b/include/sound/hwdep.h
63348 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63349 struct snd_hwdep_dsp_status *status);
63350 int (*dsp_load)(struct snd_hwdep *hw,
63351 struct snd_hwdep_dsp_image *image);
63352 -};
63353 +} __no_const;
63354
63355 struct snd_hwdep {
63356 struct snd_card *card;
63357 diff --git a/include/sound/info.h b/include/sound/info.h
63358 index 9ca1a49..aba1728 100644
63359 --- a/include/sound/info.h
63360 +++ b/include/sound/info.h
63361 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63362 struct snd_info_buffer *buffer);
63363 void (*write)(struct snd_info_entry *entry,
63364 struct snd_info_buffer *buffer);
63365 -};
63366 +} __no_const;
63367
63368 struct snd_info_entry_ops {
63369 int (*open)(struct snd_info_entry *entry,
63370 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63371 index 0cf91b2..b70cae4 100644
63372 --- a/include/sound/pcm.h
63373 +++ b/include/sound/pcm.h
63374 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63375 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63376 int (*ack)(struct snd_pcm_substream *substream);
63377 };
63378 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63379
63380 /*
63381 *
63382 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63383 index af1b49e..a5d55a5 100644
63384 --- a/include/sound/sb16_csp.h
63385 +++ b/include/sound/sb16_csp.h
63386 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63387 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63388 int (*csp_stop) (struct snd_sb_csp * p);
63389 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63390 -};
63391 +} __no_const;
63392
63393 /*
63394 * CSP private data
63395 diff --git a/include/sound/soc.h b/include/sound/soc.h
63396 index 0992dff..bb366fe 100644
63397 --- a/include/sound/soc.h
63398 +++ b/include/sound/soc.h
63399 @@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
63400 /* platform IO - used for platform DAPM */
63401 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63402 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63403 -};
63404 +} __do_const;
63405
63406 struct snd_soc_platform {
63407 const char *name;
63408 @@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
63409 struct snd_soc_dai_link *dai_link;
63410 struct mutex pcm_mutex;
63411 enum snd_soc_pcm_subclass pcm_subclass;
63412 - struct snd_pcm_ops ops;
63413 + snd_pcm_ops_no_const ops;
63414
63415 unsigned int complete:1;
63416 unsigned int dev_registered:1;
63417 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63418 index 444cd6b..3327cc5 100644
63419 --- a/include/sound/ymfpci.h
63420 +++ b/include/sound/ymfpci.h
63421 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63422 spinlock_t reg_lock;
63423 spinlock_t voice_lock;
63424 wait_queue_head_t interrupt_sleep;
63425 - atomic_t interrupt_sleep_count;
63426 + atomic_unchecked_t interrupt_sleep_count;
63427 struct snd_info_entry *proc_entry;
63428 const struct firmware *dsp_microcode;
63429 const struct firmware *controller_microcode;
63430 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63431 index dc4e345..6bf6080 100644
63432 --- a/include/target/target_core_base.h
63433 +++ b/include/target/target_core_base.h
63434 @@ -443,7 +443,7 @@ struct t10_reservation_ops {
63435 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63436 int (*t10_pr_register)(struct se_cmd *);
63437 int (*t10_pr_clear)(struct se_cmd *);
63438 -};
63439 +} __no_const;
63440
63441 struct t10_reservation {
63442 /* Reservation effects all target ports */
63443 @@ -561,8 +561,8 @@ struct se_cmd {
63444 atomic_t t_se_count;
63445 atomic_t t_task_cdbs_left;
63446 atomic_t t_task_cdbs_ex_left;
63447 - atomic_t t_task_cdbs_sent;
63448 - atomic_t t_transport_aborted;
63449 + atomic_unchecked_t t_task_cdbs_sent;
63450 + atomic_unchecked_t t_transport_aborted;
63451 atomic_t t_transport_active;
63452 atomic_t t_transport_complete;
63453 atomic_t t_transport_queue_active;
63454 @@ -799,7 +799,7 @@ struct se_device {
63455 spinlock_t stats_lock;
63456 /* Active commands on this virtual SE device */
63457 atomic_t simple_cmds;
63458 - atomic_t dev_ordered_id;
63459 + atomic_unchecked_t dev_ordered_id;
63460 atomic_t execute_tasks;
63461 atomic_t dev_ordered_sync;
63462 atomic_t dev_qf_count;
63463 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63464 index 1c09820..7f5ec79 100644
63465 --- a/include/trace/events/irq.h
63466 +++ b/include/trace/events/irq.h
63467 @@ -36,7 +36,7 @@ struct softirq_action;
63468 */
63469 TRACE_EVENT(irq_handler_entry,
63470
63471 - TP_PROTO(int irq, struct irqaction *action),
63472 + TP_PROTO(int irq, const struct irqaction *action),
63473
63474 TP_ARGS(irq, action),
63475
63476 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63477 */
63478 TRACE_EVENT(irq_handler_exit,
63479
63480 - TP_PROTO(int irq, struct irqaction *action, int ret),
63481 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63482
63483 TP_ARGS(irq, action, ret),
63484
63485 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63486 index c41f308..6918de3 100644
63487 --- a/include/video/udlfb.h
63488 +++ b/include/video/udlfb.h
63489 @@ -52,10 +52,10 @@ struct dlfb_data {
63490 u32 pseudo_palette[256];
63491 int blank_mode; /*one of FB_BLANK_ */
63492 /* blit-only rendering path metrics, exposed through sysfs */
63493 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63494 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63495 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63496 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63497 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63498 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63499 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63500 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63501 };
63502
63503 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63504 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63505 index 0993a22..32ba2fe 100644
63506 --- a/include/video/uvesafb.h
63507 +++ b/include/video/uvesafb.h
63508 @@ -177,6 +177,7 @@ struct uvesafb_par {
63509 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63510 u8 pmi_setpal; /* PMI for palette changes */
63511 u16 *pmi_base; /* protected mode interface location */
63512 + u8 *pmi_code; /* protected mode code location */
63513 void *pmi_start;
63514 void *pmi_pal;
63515 u8 *vbe_state_orig; /*
63516 diff --git a/init/Kconfig b/init/Kconfig
63517 index 3f42cd6..613f41d 100644
63518 --- a/init/Kconfig
63519 +++ b/init/Kconfig
63520 @@ -799,6 +799,7 @@ endif # CGROUPS
63521
63522 config CHECKPOINT_RESTORE
63523 bool "Checkpoint/restore support" if EXPERT
63524 + depends on !GRKERNSEC
63525 default n
63526 help
63527 Enables additional kernel features in a sake of checkpoint/restore.
63528 @@ -1249,7 +1250,7 @@ config SLUB_DEBUG
63529
63530 config COMPAT_BRK
63531 bool "Disable heap randomization"
63532 - default y
63533 + default n
63534 help
63535 Randomizing heap placement makes heap exploits harder, but it
63536 also breaks ancient binaries (including anything libc5 based).
63537 diff --git a/init/do_mounts.c b/init/do_mounts.c
63538 index 2974c8b..0b863ae 100644
63539 --- a/init/do_mounts.c
63540 +++ b/init/do_mounts.c
63541 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63542 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63543 {
63544 struct super_block *s;
63545 - int err = sys_mount(name, "/root", fs, flags, data);
63546 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63547 if (err)
63548 return err;
63549
63550 - sys_chdir((const char __user __force *)"/root");
63551 + sys_chdir((const char __force_user *)"/root");
63552 s = current->fs->pwd.dentry->d_sb;
63553 ROOT_DEV = s->s_dev;
63554 printk(KERN_INFO
63555 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63556 va_start(args, fmt);
63557 vsprintf(buf, fmt, args);
63558 va_end(args);
63559 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63560 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63561 if (fd >= 0) {
63562 sys_ioctl(fd, FDEJECT, 0);
63563 sys_close(fd);
63564 }
63565 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63566 - fd = sys_open("/dev/console", O_RDWR, 0);
63567 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63568 if (fd >= 0) {
63569 sys_ioctl(fd, TCGETS, (long)&termios);
63570 termios.c_lflag &= ~ICANON;
63571 sys_ioctl(fd, TCSETSF, (long)&termios);
63572 - sys_read(fd, &c, 1);
63573 + sys_read(fd, (char __user *)&c, 1);
63574 termios.c_lflag |= ICANON;
63575 sys_ioctl(fd, TCSETSF, (long)&termios);
63576 sys_close(fd);
63577 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63578 mount_root();
63579 out:
63580 devtmpfs_mount("dev");
63581 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63582 - sys_chroot((const char __user __force *)".");
63583 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63584 + sys_chroot((const char __force_user *)".");
63585 }
63586 diff --git a/init/do_mounts.h b/init/do_mounts.h
63587 index f5b978a..69dbfe8 100644
63588 --- a/init/do_mounts.h
63589 +++ b/init/do_mounts.h
63590 @@ -15,15 +15,15 @@ extern int root_mountflags;
63591
63592 static inline int create_dev(char *name, dev_t dev)
63593 {
63594 - sys_unlink(name);
63595 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63596 + sys_unlink((char __force_user *)name);
63597 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63598 }
63599
63600 #if BITS_PER_LONG == 32
63601 static inline u32 bstat(char *name)
63602 {
63603 struct stat64 stat;
63604 - if (sys_stat64(name, &stat) != 0)
63605 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63606 return 0;
63607 if (!S_ISBLK(stat.st_mode))
63608 return 0;
63609 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63610 static inline u32 bstat(char *name)
63611 {
63612 struct stat stat;
63613 - if (sys_newstat(name, &stat) != 0)
63614 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63615 return 0;
63616 if (!S_ISBLK(stat.st_mode))
63617 return 0;
63618 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63619 index 3098a38..253064e 100644
63620 --- a/init/do_mounts_initrd.c
63621 +++ b/init/do_mounts_initrd.c
63622 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
63623 create_dev("/dev/root.old", Root_RAM0);
63624 /* mount initrd on rootfs' /root */
63625 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63626 - sys_mkdir("/old", 0700);
63627 - root_fd = sys_open("/", 0, 0);
63628 - old_fd = sys_open("/old", 0, 0);
63629 + sys_mkdir((const char __force_user *)"/old", 0700);
63630 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
63631 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63632 /* move initrd over / and chdir/chroot in initrd root */
63633 - sys_chdir("/root");
63634 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63635 - sys_chroot(".");
63636 + sys_chdir((const char __force_user *)"/root");
63637 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63638 + sys_chroot((const char __force_user *)".");
63639
63640 /*
63641 * In case that a resume from disk is carried out by linuxrc or one of
63642 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
63643
63644 /* move initrd to rootfs' /old */
63645 sys_fchdir(old_fd);
63646 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
63647 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63648 /* switch root and cwd back to / of rootfs */
63649 sys_fchdir(root_fd);
63650 - sys_chroot(".");
63651 + sys_chroot((const char __force_user *)".");
63652 sys_close(old_fd);
63653 sys_close(root_fd);
63654
63655 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63656 - sys_chdir("/old");
63657 + sys_chdir((const char __force_user *)"/old");
63658 return;
63659 }
63660
63661 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
63662 mount_root();
63663
63664 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
63665 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
63666 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
63667 if (!error)
63668 printk("okay\n");
63669 else {
63670 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
63671 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
63672 if (error == -ENOENT)
63673 printk("/initrd does not exist. Ignored.\n");
63674 else
63675 printk("failed\n");
63676 printk(KERN_NOTICE "Unmounting old root\n");
63677 - sys_umount("/old", MNT_DETACH);
63678 + sys_umount((char __force_user *)"/old", MNT_DETACH);
63679 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
63680 if (fd < 0) {
63681 error = fd;
63682 @@ -116,11 +116,11 @@ int __init initrd_load(void)
63683 * mounted in the normal path.
63684 */
63685 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
63686 - sys_unlink("/initrd.image");
63687 + sys_unlink((const char __force_user *)"/initrd.image");
63688 handle_initrd();
63689 return 1;
63690 }
63691 }
63692 - sys_unlink("/initrd.image");
63693 + sys_unlink((const char __force_user *)"/initrd.image");
63694 return 0;
63695 }
63696 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
63697 index 32c4799..c27ee74 100644
63698 --- a/init/do_mounts_md.c
63699 +++ b/init/do_mounts_md.c
63700 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
63701 partitioned ? "_d" : "", minor,
63702 md_setup_args[ent].device_names);
63703
63704 - fd = sys_open(name, 0, 0);
63705 + fd = sys_open((char __force_user *)name, 0, 0);
63706 if (fd < 0) {
63707 printk(KERN_ERR "md: open failed - cannot start "
63708 "array %s\n", name);
63709 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
63710 * array without it
63711 */
63712 sys_close(fd);
63713 - fd = sys_open(name, 0, 0);
63714 + fd = sys_open((char __force_user *)name, 0, 0);
63715 sys_ioctl(fd, BLKRRPART, 0);
63716 }
63717 sys_close(fd);
63718 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
63719
63720 wait_for_device_probe();
63721
63722 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
63723 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
63724 if (fd >= 0) {
63725 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
63726 sys_close(fd);
63727 diff --git a/init/initramfs.c b/init/initramfs.c
63728 index 8216c30..25e8e32 100644
63729 --- a/init/initramfs.c
63730 +++ b/init/initramfs.c
63731 @@ -74,7 +74,7 @@ static void __init free_hash(void)
63732 }
63733 }
63734
63735 -static long __init do_utime(char __user *filename, time_t mtime)
63736 +static long __init do_utime(__force char __user *filename, time_t mtime)
63737 {
63738 struct timespec t[2];
63739
63740 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
63741 struct dir_entry *de, *tmp;
63742 list_for_each_entry_safe(de, tmp, &dir_list, list) {
63743 list_del(&de->list);
63744 - do_utime(de->name, de->mtime);
63745 + do_utime((char __force_user *)de->name, de->mtime);
63746 kfree(de->name);
63747 kfree(de);
63748 }
63749 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
63750 if (nlink >= 2) {
63751 char *old = find_link(major, minor, ino, mode, collected);
63752 if (old)
63753 - return (sys_link(old, collected) < 0) ? -1 : 1;
63754 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
63755 }
63756 return 0;
63757 }
63758 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
63759 {
63760 struct stat st;
63761
63762 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
63763 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
63764 if (S_ISDIR(st.st_mode))
63765 - sys_rmdir(path);
63766 + sys_rmdir((char __force_user *)path);
63767 else
63768 - sys_unlink(path);
63769 + sys_unlink((char __force_user *)path);
63770 }
63771 }
63772
63773 @@ -305,7 +305,7 @@ static int __init do_name(void)
63774 int openflags = O_WRONLY|O_CREAT;
63775 if (ml != 1)
63776 openflags |= O_TRUNC;
63777 - wfd = sys_open(collected, openflags, mode);
63778 + wfd = sys_open((char __force_user *)collected, openflags, mode);
63779
63780 if (wfd >= 0) {
63781 sys_fchown(wfd, uid, gid);
63782 @@ -317,17 +317,17 @@ static int __init do_name(void)
63783 }
63784 }
63785 } else if (S_ISDIR(mode)) {
63786 - sys_mkdir(collected, mode);
63787 - sys_chown(collected, uid, gid);
63788 - sys_chmod(collected, mode);
63789 + sys_mkdir((char __force_user *)collected, mode);
63790 + sys_chown((char __force_user *)collected, uid, gid);
63791 + sys_chmod((char __force_user *)collected, mode);
63792 dir_add(collected, mtime);
63793 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
63794 S_ISFIFO(mode) || S_ISSOCK(mode)) {
63795 if (maybe_link() == 0) {
63796 - sys_mknod(collected, mode, rdev);
63797 - sys_chown(collected, uid, gid);
63798 - sys_chmod(collected, mode);
63799 - do_utime(collected, mtime);
63800 + sys_mknod((char __force_user *)collected, mode, rdev);
63801 + sys_chown((char __force_user *)collected, uid, gid);
63802 + sys_chmod((char __force_user *)collected, mode);
63803 + do_utime((char __force_user *)collected, mtime);
63804 }
63805 }
63806 return 0;
63807 @@ -336,15 +336,15 @@ static int __init do_name(void)
63808 static int __init do_copy(void)
63809 {
63810 if (count >= body_len) {
63811 - sys_write(wfd, victim, body_len);
63812 + sys_write(wfd, (char __force_user *)victim, body_len);
63813 sys_close(wfd);
63814 - do_utime(vcollected, mtime);
63815 + do_utime((char __force_user *)vcollected, mtime);
63816 kfree(vcollected);
63817 eat(body_len);
63818 state = SkipIt;
63819 return 0;
63820 } else {
63821 - sys_write(wfd, victim, count);
63822 + sys_write(wfd, (char __force_user *)victim, count);
63823 body_len -= count;
63824 eat(count);
63825 return 1;
63826 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
63827 {
63828 collected[N_ALIGN(name_len) + body_len] = '\0';
63829 clean_path(collected, 0);
63830 - sys_symlink(collected + N_ALIGN(name_len), collected);
63831 - sys_lchown(collected, uid, gid);
63832 - do_utime(collected, mtime);
63833 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
63834 + sys_lchown((char __force_user *)collected, uid, gid);
63835 + do_utime((char __force_user *)collected, mtime);
63836 state = SkipIt;
63837 next_state = Reset;
63838 return 0;
63839 diff --git a/init/main.c b/init/main.c
63840 index ff49a6d..5fa0429 100644
63841 --- a/init/main.c
63842 +++ b/init/main.c
63843 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
63844 extern void tc_init(void);
63845 #endif
63846
63847 +extern void grsecurity_init(void);
63848 +
63849 /*
63850 * Debug helper: via this flag we know that we are in 'early bootup code'
63851 * where only the boot processor is running with IRQ disabled. This means
63852 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
63853
63854 __setup("reset_devices", set_reset_devices);
63855
63856 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
63857 +extern char pax_enter_kernel_user[];
63858 +extern char pax_exit_kernel_user[];
63859 +extern pgdval_t clone_pgd_mask;
63860 +#endif
63861 +
63862 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
63863 +static int __init setup_pax_nouderef(char *str)
63864 +{
63865 +#ifdef CONFIG_X86_32
63866 + unsigned int cpu;
63867 + struct desc_struct *gdt;
63868 +
63869 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
63870 + gdt = get_cpu_gdt_table(cpu);
63871 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
63872 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
63873 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
63874 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
63875 + }
63876 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
63877 +#else
63878 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
63879 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
63880 + clone_pgd_mask = ~(pgdval_t)0UL;
63881 +#endif
63882 +
63883 + return 0;
63884 +}
63885 +early_param("pax_nouderef", setup_pax_nouderef);
63886 +#endif
63887 +
63888 +#ifdef CONFIG_PAX_SOFTMODE
63889 +int pax_softmode;
63890 +
63891 +static int __init setup_pax_softmode(char *str)
63892 +{
63893 + get_option(&str, &pax_softmode);
63894 + return 1;
63895 +}
63896 +__setup("pax_softmode=", setup_pax_softmode);
63897 +#endif
63898 +
63899 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
63900 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
63901 static const char *panic_later, *panic_param;
63902 @@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
63903 {
63904 int count = preempt_count();
63905 int ret;
63906 + const char *msg1 = "", *msg2 = "";
63907
63908 if (initcall_debug)
63909 ret = do_one_initcall_debug(fn);
63910 @@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
63911 sprintf(msgbuf, "error code %d ", ret);
63912
63913 if (preempt_count() != count) {
63914 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
63915 + msg1 = " preemption imbalance";
63916 preempt_count() = count;
63917 }
63918 if (irqs_disabled()) {
63919 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
63920 + msg2 = " disabled interrupts";
63921 local_irq_enable();
63922 }
63923 - if (msgbuf[0]) {
63924 - printk("initcall %pF returned with %s\n", fn, msgbuf);
63925 + if (msgbuf[0] || *msg1 || *msg2) {
63926 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
63927 }
63928
63929 return ret;
63930 @@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
63931 do_basic_setup();
63932
63933 /* Open the /dev/console on the rootfs, this should never fail */
63934 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
63935 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
63936 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
63937
63938 (void) sys_dup(0);
63939 @@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
63940 if (!ramdisk_execute_command)
63941 ramdisk_execute_command = "/init";
63942
63943 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
63944 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
63945 ramdisk_execute_command = NULL;
63946 prepare_namespace();
63947 }
63948
63949 + grsecurity_init();
63950 +
63951 /*
63952 * Ok, we have completed the initial bootup, and
63953 * we're essentially up and running. Get rid of the
63954 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
63955 index 86ee272..773d937 100644
63956 --- a/ipc/mqueue.c
63957 +++ b/ipc/mqueue.c
63958 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
63959 mq_bytes = (mq_msg_tblsz +
63960 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
63961
63962 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
63963 spin_lock(&mq_lock);
63964 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
63965 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
63966 diff --git a/ipc/msg.c b/ipc/msg.c
63967 index 7385de2..a8180e08 100644
63968 --- a/ipc/msg.c
63969 +++ b/ipc/msg.c
63970 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
63971 return security_msg_queue_associate(msq, msgflg);
63972 }
63973
63974 +static struct ipc_ops msg_ops = {
63975 + .getnew = newque,
63976 + .associate = msg_security,
63977 + .more_checks = NULL
63978 +};
63979 +
63980 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
63981 {
63982 struct ipc_namespace *ns;
63983 - struct ipc_ops msg_ops;
63984 struct ipc_params msg_params;
63985
63986 ns = current->nsproxy->ipc_ns;
63987
63988 - msg_ops.getnew = newque;
63989 - msg_ops.associate = msg_security;
63990 - msg_ops.more_checks = NULL;
63991 -
63992 msg_params.key = key;
63993 msg_params.flg = msgflg;
63994
63995 diff --git a/ipc/sem.c b/ipc/sem.c
63996 index 5215a81..cfc0cac 100644
63997 --- a/ipc/sem.c
63998 +++ b/ipc/sem.c
63999 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64000 return 0;
64001 }
64002
64003 +static struct ipc_ops sem_ops = {
64004 + .getnew = newary,
64005 + .associate = sem_security,
64006 + .more_checks = sem_more_checks
64007 +};
64008 +
64009 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64010 {
64011 struct ipc_namespace *ns;
64012 - struct ipc_ops sem_ops;
64013 struct ipc_params sem_params;
64014
64015 ns = current->nsproxy->ipc_ns;
64016 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64017 if (nsems < 0 || nsems > ns->sc_semmsl)
64018 return -EINVAL;
64019
64020 - sem_ops.getnew = newary;
64021 - sem_ops.associate = sem_security;
64022 - sem_ops.more_checks = sem_more_checks;
64023 -
64024 sem_params.key = key;
64025 sem_params.flg = semflg;
64026 sem_params.u.nsems = nsems;
64027 diff --git a/ipc/shm.c b/ipc/shm.c
64028 index b76be5b..859e750 100644
64029 --- a/ipc/shm.c
64030 +++ b/ipc/shm.c
64031 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64032 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64033 #endif
64034
64035 +#ifdef CONFIG_GRKERNSEC
64036 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64037 + const time_t shm_createtime, const uid_t cuid,
64038 + const int shmid);
64039 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64040 + const time_t shm_createtime);
64041 +#endif
64042 +
64043 void shm_init_ns(struct ipc_namespace *ns)
64044 {
64045 ns->shm_ctlmax = SHMMAX;
64046 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64047 shp->shm_lprid = 0;
64048 shp->shm_atim = shp->shm_dtim = 0;
64049 shp->shm_ctim = get_seconds();
64050 +#ifdef CONFIG_GRKERNSEC
64051 + {
64052 + struct timespec timeval;
64053 + do_posix_clock_monotonic_gettime(&timeval);
64054 +
64055 + shp->shm_createtime = timeval.tv_sec;
64056 + }
64057 +#endif
64058 shp->shm_segsz = size;
64059 shp->shm_nattch = 0;
64060 shp->shm_file = file;
64061 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64062 return 0;
64063 }
64064
64065 +static struct ipc_ops shm_ops = {
64066 + .getnew = newseg,
64067 + .associate = shm_security,
64068 + .more_checks = shm_more_checks
64069 +};
64070 +
64071 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64072 {
64073 struct ipc_namespace *ns;
64074 - struct ipc_ops shm_ops;
64075 struct ipc_params shm_params;
64076
64077 ns = current->nsproxy->ipc_ns;
64078
64079 - shm_ops.getnew = newseg;
64080 - shm_ops.associate = shm_security;
64081 - shm_ops.more_checks = shm_more_checks;
64082 -
64083 shm_params.key = key;
64084 shm_params.flg = shmflg;
64085 shm_params.u.size = size;
64086 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64087 f_mode = FMODE_READ | FMODE_WRITE;
64088 }
64089 if (shmflg & SHM_EXEC) {
64090 +
64091 +#ifdef CONFIG_PAX_MPROTECT
64092 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64093 + goto out;
64094 +#endif
64095 +
64096 prot |= PROT_EXEC;
64097 acc_mode |= S_IXUGO;
64098 }
64099 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64100 if (err)
64101 goto out_unlock;
64102
64103 +#ifdef CONFIG_GRKERNSEC
64104 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64105 + shp->shm_perm.cuid, shmid) ||
64106 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64107 + err = -EACCES;
64108 + goto out_unlock;
64109 + }
64110 +#endif
64111 +
64112 path = shp->shm_file->f_path;
64113 path_get(&path);
64114 shp->shm_nattch++;
64115 +#ifdef CONFIG_GRKERNSEC
64116 + shp->shm_lapid = current->pid;
64117 +#endif
64118 size = i_size_read(path.dentry->d_inode);
64119 shm_unlock(shp);
64120
64121 diff --git a/kernel/acct.c b/kernel/acct.c
64122 index 02e6167..54824f7 100644
64123 --- a/kernel/acct.c
64124 +++ b/kernel/acct.c
64125 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64126 */
64127 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64128 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64129 - file->f_op->write(file, (char *)&ac,
64130 + file->f_op->write(file, (char __force_user *)&ac,
64131 sizeof(acct_t), &file->f_pos);
64132 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64133 set_fs(fs);
64134 diff --git a/kernel/audit.c b/kernel/audit.c
64135 index bb0eb5b..cf2a03a 100644
64136 --- a/kernel/audit.c
64137 +++ b/kernel/audit.c
64138 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64139 3) suppressed due to audit_rate_limit
64140 4) suppressed due to audit_backlog_limit
64141 */
64142 -static atomic_t audit_lost = ATOMIC_INIT(0);
64143 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64144
64145 /* The netlink socket. */
64146 static struct sock *audit_sock;
64147 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64148 unsigned long now;
64149 int print;
64150
64151 - atomic_inc(&audit_lost);
64152 + atomic_inc_unchecked(&audit_lost);
64153
64154 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64155
64156 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64157 printk(KERN_WARNING
64158 "audit: audit_lost=%d audit_rate_limit=%d "
64159 "audit_backlog_limit=%d\n",
64160 - atomic_read(&audit_lost),
64161 + atomic_read_unchecked(&audit_lost),
64162 audit_rate_limit,
64163 audit_backlog_limit);
64164 audit_panic(message);
64165 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64166 status_set.pid = audit_pid;
64167 status_set.rate_limit = audit_rate_limit;
64168 status_set.backlog_limit = audit_backlog_limit;
64169 - status_set.lost = atomic_read(&audit_lost);
64170 + status_set.lost = atomic_read_unchecked(&audit_lost);
64171 status_set.backlog = skb_queue_len(&audit_skb_queue);
64172 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64173 &status_set, sizeof(status_set));
64174 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64175 index af1de0f..06dfe57 100644
64176 --- a/kernel/auditsc.c
64177 +++ b/kernel/auditsc.c
64178 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64179 }
64180
64181 /* global counter which is incremented every time something logs in */
64182 -static atomic_t session_id = ATOMIC_INIT(0);
64183 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64184
64185 /**
64186 * audit_set_loginuid - set current task's audit_context loginuid
64187 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64188 return -EPERM;
64189 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64190
64191 - sessionid = atomic_inc_return(&session_id);
64192 + sessionid = atomic_inc_return_unchecked(&session_id);
64193 if (context && context->in_syscall) {
64194 struct audit_buffer *ab;
64195
64196 diff --git a/kernel/capability.c b/kernel/capability.c
64197 index 3f1adb6..c564db0 100644
64198 --- a/kernel/capability.c
64199 +++ b/kernel/capability.c
64200 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64201 * before modification is attempted and the application
64202 * fails.
64203 */
64204 + if (tocopy > ARRAY_SIZE(kdata))
64205 + return -EFAULT;
64206 +
64207 if (copy_to_user(dataptr, kdata, tocopy
64208 * sizeof(struct __user_cap_data_struct))) {
64209 return -EFAULT;
64210 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64211 int ret;
64212
64213 rcu_read_lock();
64214 - ret = security_capable(__task_cred(t), ns, cap);
64215 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64216 + gr_task_is_capable(t, __task_cred(t), cap);
64217 rcu_read_unlock();
64218
64219 - return (ret == 0);
64220 + return ret;
64221 }
64222
64223 /**
64224 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64225 int ret;
64226
64227 rcu_read_lock();
64228 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
64229 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64230 rcu_read_unlock();
64231
64232 - return (ret == 0);
64233 + return ret;
64234 }
64235
64236 /**
64237 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64238 BUG();
64239 }
64240
64241 - if (security_capable(current_cred(), ns, cap) == 0) {
64242 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64243 current->flags |= PF_SUPERPRIV;
64244 return true;
64245 }
64246 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64247 }
64248 EXPORT_SYMBOL(ns_capable);
64249
64250 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64251 +{
64252 + if (unlikely(!cap_valid(cap))) {
64253 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64254 + BUG();
64255 + }
64256 +
64257 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64258 + current->flags |= PF_SUPERPRIV;
64259 + return true;
64260 + }
64261 + return false;
64262 +}
64263 +EXPORT_SYMBOL(ns_capable_nolog);
64264 +
64265 /**
64266 * capable - Determine if the current task has a superior capability in effect
64267 * @cap: The capability to be tested for
64268 @@ -408,6 +427,12 @@ bool capable(int cap)
64269 }
64270 EXPORT_SYMBOL(capable);
64271
64272 +bool capable_nolog(int cap)
64273 +{
64274 + return ns_capable_nolog(&init_user_ns, cap);
64275 +}
64276 +EXPORT_SYMBOL(capable_nolog);
64277 +
64278 /**
64279 * nsown_capable - Check superior capability to one's own user_ns
64280 * @cap: The capability in question
64281 diff --git a/kernel/compat.c b/kernel/compat.c
64282 index f346ced..aa2b1f4 100644
64283 --- a/kernel/compat.c
64284 +++ b/kernel/compat.c
64285 @@ -13,6 +13,7 @@
64286
64287 #include <linux/linkage.h>
64288 #include <linux/compat.h>
64289 +#include <linux/module.h>
64290 #include <linux/errno.h>
64291 #include <linux/time.h>
64292 #include <linux/signal.h>
64293 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64294 mm_segment_t oldfs;
64295 long ret;
64296
64297 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64298 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64299 oldfs = get_fs();
64300 set_fs(KERNEL_DS);
64301 ret = hrtimer_nanosleep_restart(restart);
64302 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64303 oldfs = get_fs();
64304 set_fs(KERNEL_DS);
64305 ret = hrtimer_nanosleep(&tu,
64306 - rmtp ? (struct timespec __user *)&rmt : NULL,
64307 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64308 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64309 set_fs(oldfs);
64310
64311 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64312 mm_segment_t old_fs = get_fs();
64313
64314 set_fs(KERNEL_DS);
64315 - ret = sys_sigpending((old_sigset_t __user *) &s);
64316 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64317 set_fs(old_fs);
64318 if (ret == 0)
64319 ret = put_user(s, set);
64320 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
64321 old_fs = get_fs();
64322 set_fs(KERNEL_DS);
64323 ret = sys_sigprocmask(how,
64324 - set ? (old_sigset_t __user *) &s : NULL,
64325 - oset ? (old_sigset_t __user *) &s : NULL);
64326 + set ? (old_sigset_t __force_user *) &s : NULL,
64327 + oset ? (old_sigset_t __force_user *) &s : NULL);
64328 set_fs(old_fs);
64329 if (ret == 0)
64330 if (oset)
64331 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64332 mm_segment_t old_fs = get_fs();
64333
64334 set_fs(KERNEL_DS);
64335 - ret = sys_old_getrlimit(resource, &r);
64336 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64337 set_fs(old_fs);
64338
64339 if (!ret) {
64340 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64341 mm_segment_t old_fs = get_fs();
64342
64343 set_fs(KERNEL_DS);
64344 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64345 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64346 set_fs(old_fs);
64347
64348 if (ret)
64349 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64350 set_fs (KERNEL_DS);
64351 ret = sys_wait4(pid,
64352 (stat_addr ?
64353 - (unsigned int __user *) &status : NULL),
64354 - options, (struct rusage __user *) &r);
64355 + (unsigned int __force_user *) &status : NULL),
64356 + options, (struct rusage __force_user *) &r);
64357 set_fs (old_fs);
64358
64359 if (ret > 0) {
64360 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64361 memset(&info, 0, sizeof(info));
64362
64363 set_fs(KERNEL_DS);
64364 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64365 - uru ? (struct rusage __user *)&ru : NULL);
64366 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64367 + uru ? (struct rusage __force_user *)&ru : NULL);
64368 set_fs(old_fs);
64369
64370 if ((ret < 0) || (info.si_signo == 0))
64371 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64372 oldfs = get_fs();
64373 set_fs(KERNEL_DS);
64374 err = sys_timer_settime(timer_id, flags,
64375 - (struct itimerspec __user *) &newts,
64376 - (struct itimerspec __user *) &oldts);
64377 + (struct itimerspec __force_user *) &newts,
64378 + (struct itimerspec __force_user *) &oldts);
64379 set_fs(oldfs);
64380 if (!err && old && put_compat_itimerspec(old, &oldts))
64381 return -EFAULT;
64382 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64383 oldfs = get_fs();
64384 set_fs(KERNEL_DS);
64385 err = sys_timer_gettime(timer_id,
64386 - (struct itimerspec __user *) &ts);
64387 + (struct itimerspec __force_user *) &ts);
64388 set_fs(oldfs);
64389 if (!err && put_compat_itimerspec(setting, &ts))
64390 return -EFAULT;
64391 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64392 oldfs = get_fs();
64393 set_fs(KERNEL_DS);
64394 err = sys_clock_settime(which_clock,
64395 - (struct timespec __user *) &ts);
64396 + (struct timespec __force_user *) &ts);
64397 set_fs(oldfs);
64398 return err;
64399 }
64400 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64401 oldfs = get_fs();
64402 set_fs(KERNEL_DS);
64403 err = sys_clock_gettime(which_clock,
64404 - (struct timespec __user *) &ts);
64405 + (struct timespec __force_user *) &ts);
64406 set_fs(oldfs);
64407 if (!err && put_compat_timespec(&ts, tp))
64408 return -EFAULT;
64409 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64410
64411 oldfs = get_fs();
64412 set_fs(KERNEL_DS);
64413 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64414 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64415 set_fs(oldfs);
64416
64417 err = compat_put_timex(utp, &txc);
64418 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64419 oldfs = get_fs();
64420 set_fs(KERNEL_DS);
64421 err = sys_clock_getres(which_clock,
64422 - (struct timespec __user *) &ts);
64423 + (struct timespec __force_user *) &ts);
64424 set_fs(oldfs);
64425 if (!err && tp && put_compat_timespec(&ts, tp))
64426 return -EFAULT;
64427 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64428 long err;
64429 mm_segment_t oldfs;
64430 struct timespec tu;
64431 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64432 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64433
64434 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64435 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64436 oldfs = get_fs();
64437 set_fs(KERNEL_DS);
64438 err = clock_nanosleep_restart(restart);
64439 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64440 oldfs = get_fs();
64441 set_fs(KERNEL_DS);
64442 err = sys_clock_nanosleep(which_clock, flags,
64443 - (struct timespec __user *) &in,
64444 - (struct timespec __user *) &out);
64445 + (struct timespec __force_user *) &in,
64446 + (struct timespec __force_user *) &out);
64447 set_fs(oldfs);
64448
64449 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64450 diff --git a/kernel/configs.c b/kernel/configs.c
64451 index 42e8fa0..9e7406b 100644
64452 --- a/kernel/configs.c
64453 +++ b/kernel/configs.c
64454 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64455 struct proc_dir_entry *entry;
64456
64457 /* create the current config file */
64458 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64459 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64460 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64461 + &ikconfig_file_ops);
64462 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64463 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64464 + &ikconfig_file_ops);
64465 +#endif
64466 +#else
64467 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64468 &ikconfig_file_ops);
64469 +#endif
64470 +
64471 if (!entry)
64472 return -ENOMEM;
64473
64474 diff --git a/kernel/cred.c b/kernel/cred.c
64475 index 5791612..a3c04dc 100644
64476 --- a/kernel/cred.c
64477 +++ b/kernel/cred.c
64478 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
64479 validate_creds(cred);
64480 put_cred(cred);
64481 }
64482 +
64483 +#ifdef CONFIG_GRKERNSEC_SETXID
64484 + cred = (struct cred *) tsk->delayed_cred;
64485 + if (cred) {
64486 + tsk->delayed_cred = NULL;
64487 + validate_creds(cred);
64488 + put_cred(cred);
64489 + }
64490 +#endif
64491 }
64492
64493 /**
64494 @@ -470,7 +479,7 @@ error_put:
64495 * Always returns 0 thus allowing this function to be tail-called at the end
64496 * of, say, sys_setgid().
64497 */
64498 -int commit_creds(struct cred *new)
64499 +static int __commit_creds(struct cred *new)
64500 {
64501 struct task_struct *task = current;
64502 const struct cred *old = task->real_cred;
64503 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
64504
64505 get_cred(new); /* we will require a ref for the subj creds too */
64506
64507 + gr_set_role_label(task, new->uid, new->gid);
64508 +
64509 /* dumpability changes */
64510 if (old->euid != new->euid ||
64511 old->egid != new->egid ||
64512 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
64513 put_cred(old);
64514 return 0;
64515 }
64516 +#ifdef CONFIG_GRKERNSEC_SETXID
64517 +extern int set_user(struct cred *new);
64518 +
64519 +void gr_delayed_cred_worker(void)
64520 +{
64521 + const struct cred *new = current->delayed_cred;
64522 + struct cred *ncred;
64523 +
64524 + current->delayed_cred = NULL;
64525 +
64526 + if (current_uid() && new != NULL) {
64527 + // from doing get_cred on it when queueing this
64528 + put_cred(new);
64529 + return;
64530 + } else if (new == NULL)
64531 + return;
64532 +
64533 + ncred = prepare_creds();
64534 + if (!ncred)
64535 + goto die;
64536 + // uids
64537 + ncred->uid = new->uid;
64538 + ncred->euid = new->euid;
64539 + ncred->suid = new->suid;
64540 + ncred->fsuid = new->fsuid;
64541 + // gids
64542 + ncred->gid = new->gid;
64543 + ncred->egid = new->egid;
64544 + ncred->sgid = new->sgid;
64545 + ncred->fsgid = new->fsgid;
64546 + // groups
64547 + if (set_groups(ncred, new->group_info) < 0) {
64548 + abort_creds(ncred);
64549 + goto die;
64550 + }
64551 + // caps
64552 + ncred->securebits = new->securebits;
64553 + ncred->cap_inheritable = new->cap_inheritable;
64554 + ncred->cap_permitted = new->cap_permitted;
64555 + ncred->cap_effective = new->cap_effective;
64556 + ncred->cap_bset = new->cap_bset;
64557 +
64558 + if (set_user(ncred)) {
64559 + abort_creds(ncred);
64560 + goto die;
64561 + }
64562 +
64563 + // from doing get_cred on it when queueing this
64564 + put_cred(new);
64565 +
64566 + __commit_creds(ncred);
64567 + return;
64568 +die:
64569 + // from doing get_cred on it when queueing this
64570 + put_cred(new);
64571 + do_group_exit(SIGKILL);
64572 +}
64573 +#endif
64574 +
64575 +int commit_creds(struct cred *new)
64576 +{
64577 +#ifdef CONFIG_GRKERNSEC_SETXID
64578 + struct task_struct *t;
64579 +
64580 + /* we won't get called with tasklist_lock held for writing
64581 + and interrupts disabled as the cred struct in that case is
64582 + init_cred
64583 + */
64584 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64585 + !current_uid() && new->uid) {
64586 + rcu_read_lock();
64587 + read_lock(&tasklist_lock);
64588 + for (t = next_thread(current); t != current;
64589 + t = next_thread(t)) {
64590 + if (t->delayed_cred == NULL) {
64591 + t->delayed_cred = get_cred(new);
64592 + set_tsk_need_resched(t);
64593 + }
64594 + }
64595 + read_unlock(&tasklist_lock);
64596 + rcu_read_unlock();
64597 + }
64598 +#endif
64599 + return __commit_creds(new);
64600 +}
64601 +
64602 EXPORT_SYMBOL(commit_creds);
64603
64604 /**
64605 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64606 index 0d7c087..01b8cef 100644
64607 --- a/kernel/debug/debug_core.c
64608 +++ b/kernel/debug/debug_core.c
64609 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64610 */
64611 static atomic_t masters_in_kgdb;
64612 static atomic_t slaves_in_kgdb;
64613 -static atomic_t kgdb_break_tasklet_var;
64614 +static atomic_unchecked_t kgdb_break_tasklet_var;
64615 atomic_t kgdb_setting_breakpoint;
64616
64617 struct task_struct *kgdb_usethread;
64618 @@ -129,7 +129,7 @@ int kgdb_single_step;
64619 static pid_t kgdb_sstep_pid;
64620
64621 /* to keep track of the CPU which is doing the single stepping*/
64622 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64623 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64624
64625 /*
64626 * If you are debugging a problem where roundup (the collection of
64627 @@ -542,7 +542,7 @@ return_normal:
64628 * kernel will only try for the value of sstep_tries before
64629 * giving up and continuing on.
64630 */
64631 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64632 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64633 (kgdb_info[cpu].task &&
64634 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64635 atomic_set(&kgdb_active, -1);
64636 @@ -636,8 +636,8 @@ cpu_master_loop:
64637 }
64638
64639 kgdb_restore:
64640 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64641 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64642 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64643 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64644 if (kgdb_info[sstep_cpu].task)
64645 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64646 else
64647 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
64648 static void kgdb_tasklet_bpt(unsigned long ing)
64649 {
64650 kgdb_breakpoint();
64651 - atomic_set(&kgdb_break_tasklet_var, 0);
64652 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64653 }
64654
64655 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64656
64657 void kgdb_schedule_breakpoint(void)
64658 {
64659 - if (atomic_read(&kgdb_break_tasklet_var) ||
64660 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
64661 atomic_read(&kgdb_active) != -1 ||
64662 atomic_read(&kgdb_setting_breakpoint))
64663 return;
64664 - atomic_inc(&kgdb_break_tasklet_var);
64665 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
64666 tasklet_schedule(&kgdb_tasklet_breakpoint);
64667 }
64668 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
64669 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
64670 index e2ae734..08a4c5c 100644
64671 --- a/kernel/debug/kdb/kdb_main.c
64672 +++ b/kernel/debug/kdb/kdb_main.c
64673 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
64674 list_for_each_entry(mod, kdb_modules, list) {
64675
64676 kdb_printf("%-20s%8u 0x%p ", mod->name,
64677 - mod->core_size, (void *)mod);
64678 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
64679 #ifdef CONFIG_MODULE_UNLOAD
64680 kdb_printf("%4ld ", module_refcount(mod));
64681 #endif
64682 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
64683 kdb_printf(" (Loading)");
64684 else
64685 kdb_printf(" (Live)");
64686 - kdb_printf(" 0x%p", mod->module_core);
64687 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64688
64689 #ifdef CONFIG_MODULE_UNLOAD
64690 {
64691 diff --git a/kernel/events/core.c b/kernel/events/core.c
64692 index 1b5c081..c375f83 100644
64693 --- a/kernel/events/core.c
64694 +++ b/kernel/events/core.c
64695 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
64696 return 0;
64697 }
64698
64699 -static atomic64_t perf_event_id;
64700 +static atomic64_unchecked_t perf_event_id;
64701
64702 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
64703 enum event_type_t event_type);
64704 @@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
64705
64706 static inline u64 perf_event_count(struct perf_event *event)
64707 {
64708 - return local64_read(&event->count) + atomic64_read(&event->child_count);
64709 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
64710 }
64711
64712 static u64 perf_event_read(struct perf_event *event)
64713 @@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
64714 mutex_lock(&event->child_mutex);
64715 total += perf_event_read(event);
64716 *enabled += event->total_time_enabled +
64717 - atomic64_read(&event->child_total_time_enabled);
64718 + atomic64_read_unchecked(&event->child_total_time_enabled);
64719 *running += event->total_time_running +
64720 - atomic64_read(&event->child_total_time_running);
64721 + atomic64_read_unchecked(&event->child_total_time_running);
64722
64723 list_for_each_entry(child, &event->child_list, child_list) {
64724 total += perf_event_read(child);
64725 @@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
64726 userpg->offset -= local64_read(&event->hw.prev_count);
64727
64728 userpg->time_enabled = enabled +
64729 - atomic64_read(&event->child_total_time_enabled);
64730 + atomic64_read_unchecked(&event->child_total_time_enabled);
64731
64732 userpg->time_running = running +
64733 - atomic64_read(&event->child_total_time_running);
64734 + atomic64_read_unchecked(&event->child_total_time_running);
64735
64736 barrier();
64737 ++userpg->lock;
64738 @@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
64739 values[n++] = perf_event_count(event);
64740 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64741 values[n++] = enabled +
64742 - atomic64_read(&event->child_total_time_enabled);
64743 + atomic64_read_unchecked(&event->child_total_time_enabled);
64744 }
64745 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64746 values[n++] = running +
64747 - atomic64_read(&event->child_total_time_running);
64748 + atomic64_read_unchecked(&event->child_total_time_running);
64749 }
64750 if (read_format & PERF_FORMAT_ID)
64751 values[n++] = primary_event_id(event);
64752 @@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
64753 * need to add enough zero bytes after the string to handle
64754 * the 64bit alignment we do later.
64755 */
64756 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
64757 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
64758 if (!buf) {
64759 name = strncpy(tmp, "//enomem", sizeof(tmp));
64760 goto got_name;
64761 }
64762 - name = d_path(&file->f_path, buf, PATH_MAX);
64763 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
64764 if (IS_ERR(name)) {
64765 name = strncpy(tmp, "//toolong", sizeof(tmp));
64766 goto got_name;
64767 @@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
64768 event->parent = parent_event;
64769
64770 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64771 - event->id = atomic64_inc_return(&perf_event_id);
64772 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64773
64774 event->state = PERF_EVENT_STATE_INACTIVE;
64775
64776 @@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
64777 /*
64778 * Add back the child's count to the parent's count:
64779 */
64780 - atomic64_add(child_val, &parent_event->child_count);
64781 - atomic64_add(child_event->total_time_enabled,
64782 + atomic64_add_unchecked(child_val, &parent_event->child_count);
64783 + atomic64_add_unchecked(child_event->total_time_enabled,
64784 &parent_event->child_total_time_enabled);
64785 - atomic64_add(child_event->total_time_running,
64786 + atomic64_add_unchecked(child_event->total_time_running,
64787 &parent_event->child_total_time_running);
64788
64789 /*
64790 diff --git a/kernel/exit.c b/kernel/exit.c
64791 index 4b4042f..5bdd8d5 100644
64792 --- a/kernel/exit.c
64793 +++ b/kernel/exit.c
64794 @@ -58,6 +58,10 @@
64795 #include <asm/pgtable.h>
64796 #include <asm/mmu_context.h>
64797
64798 +#ifdef CONFIG_GRKERNSEC
64799 +extern rwlock_t grsec_exec_file_lock;
64800 +#endif
64801 +
64802 static void exit_mm(struct task_struct * tsk);
64803
64804 static void __unhash_process(struct task_struct *p, bool group_dead)
64805 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
64806 struct task_struct *leader;
64807 int zap_leader;
64808 repeat:
64809 +#ifdef CONFIG_NET
64810 + gr_del_task_from_ip_table(p);
64811 +#endif
64812 +
64813 /* don't need to get the RCU readlock here - the process is dead and
64814 * can't be modifying its own credentials. But shut RCU-lockdep up */
64815 rcu_read_lock();
64816 @@ -381,7 +389,7 @@ int allow_signal(int sig)
64817 * know it'll be handled, so that they don't get converted to
64818 * SIGKILL or just silently dropped.
64819 */
64820 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
64821 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
64822 recalc_sigpending();
64823 spin_unlock_irq(&current->sighand->siglock);
64824 return 0;
64825 @@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
64826 vsnprintf(current->comm, sizeof(current->comm), name, args);
64827 va_end(args);
64828
64829 +#ifdef CONFIG_GRKERNSEC
64830 + write_lock(&grsec_exec_file_lock);
64831 + if (current->exec_file) {
64832 + fput(current->exec_file);
64833 + current->exec_file = NULL;
64834 + }
64835 + write_unlock(&grsec_exec_file_lock);
64836 +#endif
64837 +
64838 + gr_set_kernel_label(current);
64839 +
64840 /*
64841 * If we were started as result of loading a module, close all of the
64842 * user space pages. We don't need them, and if we didn't close them
64843 @@ -892,6 +911,8 @@ void do_exit(long code)
64844 struct task_struct *tsk = current;
64845 int group_dead;
64846
64847 + set_fs(USER_DS);
64848 +
64849 profile_task_exit(tsk);
64850
64851 WARN_ON(blk_needs_flush_plug(tsk));
64852 @@ -908,7 +929,6 @@ void do_exit(long code)
64853 * mm_release()->clear_child_tid() from writing to a user-controlled
64854 * kernel address.
64855 */
64856 - set_fs(USER_DS);
64857
64858 ptrace_event(PTRACE_EVENT_EXIT, code);
64859
64860 @@ -969,6 +989,9 @@ void do_exit(long code)
64861 tsk->exit_code = code;
64862 taskstats_exit(tsk, group_dead);
64863
64864 + gr_acl_handle_psacct(tsk, code);
64865 + gr_acl_handle_exit();
64866 +
64867 exit_mm(tsk);
64868
64869 if (group_dead)
64870 @@ -1085,7 +1108,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
64871 * Take down every thread in the group. This is called by fatal signals
64872 * as well as by sys_exit_group (below).
64873 */
64874 -void
64875 +__noreturn void
64876 do_group_exit(int exit_code)
64877 {
64878 struct signal_struct *sig = current->signal;
64879 diff --git a/kernel/fork.c b/kernel/fork.c
64880 index 26a7a67..a1053f9 100644
64881 --- a/kernel/fork.c
64882 +++ b/kernel/fork.c
64883 @@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
64884 *stackend = STACK_END_MAGIC; /* for overflow detection */
64885
64886 #ifdef CONFIG_CC_STACKPROTECTOR
64887 - tsk->stack_canary = get_random_int();
64888 + tsk->stack_canary = pax_get_random_long();
64889 #endif
64890
64891 /*
64892 @@ -308,13 +308,77 @@ out:
64893 }
64894
64895 #ifdef CONFIG_MMU
64896 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
64897 +{
64898 + struct vm_area_struct *tmp;
64899 + unsigned long charge;
64900 + struct mempolicy *pol;
64901 + struct file *file;
64902 +
64903 + charge = 0;
64904 + if (mpnt->vm_flags & VM_ACCOUNT) {
64905 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
64906 + if (security_vm_enough_memory(len))
64907 + goto fail_nomem;
64908 + charge = len;
64909 + }
64910 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64911 + if (!tmp)
64912 + goto fail_nomem;
64913 + *tmp = *mpnt;
64914 + tmp->vm_mm = mm;
64915 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
64916 + pol = mpol_dup(vma_policy(mpnt));
64917 + if (IS_ERR(pol))
64918 + goto fail_nomem_policy;
64919 + vma_set_policy(tmp, pol);
64920 + if (anon_vma_fork(tmp, mpnt))
64921 + goto fail_nomem_anon_vma_fork;
64922 + tmp->vm_flags &= ~VM_LOCKED;
64923 + tmp->vm_next = tmp->vm_prev = NULL;
64924 + tmp->vm_mirror = NULL;
64925 + file = tmp->vm_file;
64926 + if (file) {
64927 + struct inode *inode = file->f_path.dentry->d_inode;
64928 + struct address_space *mapping = file->f_mapping;
64929 +
64930 + get_file(file);
64931 + if (tmp->vm_flags & VM_DENYWRITE)
64932 + atomic_dec(&inode->i_writecount);
64933 + mutex_lock(&mapping->i_mmap_mutex);
64934 + if (tmp->vm_flags & VM_SHARED)
64935 + mapping->i_mmap_writable++;
64936 + flush_dcache_mmap_lock(mapping);
64937 + /* insert tmp into the share list, just after mpnt */
64938 + vma_prio_tree_add(tmp, mpnt);
64939 + flush_dcache_mmap_unlock(mapping);
64940 + mutex_unlock(&mapping->i_mmap_mutex);
64941 + }
64942 +
64943 + /*
64944 + * Clear hugetlb-related page reserves for children. This only
64945 + * affects MAP_PRIVATE mappings. Faults generated by the child
64946 + * are not guaranteed to succeed, even if read-only
64947 + */
64948 + if (is_vm_hugetlb_page(tmp))
64949 + reset_vma_resv_huge_pages(tmp);
64950 +
64951 + return tmp;
64952 +
64953 +fail_nomem_anon_vma_fork:
64954 + mpol_put(pol);
64955 +fail_nomem_policy:
64956 + kmem_cache_free(vm_area_cachep, tmp);
64957 +fail_nomem:
64958 + vm_unacct_memory(charge);
64959 + return NULL;
64960 +}
64961 +
64962 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64963 {
64964 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
64965 struct rb_node **rb_link, *rb_parent;
64966 int retval;
64967 - unsigned long charge;
64968 - struct mempolicy *pol;
64969
64970 down_write(&oldmm->mmap_sem);
64971 flush_cache_dup_mm(oldmm);
64972 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64973 mm->locked_vm = 0;
64974 mm->mmap = NULL;
64975 mm->mmap_cache = NULL;
64976 - mm->free_area_cache = oldmm->mmap_base;
64977 - mm->cached_hole_size = ~0UL;
64978 + mm->free_area_cache = oldmm->free_area_cache;
64979 + mm->cached_hole_size = oldmm->cached_hole_size;
64980 mm->map_count = 0;
64981 cpumask_clear(mm_cpumask(mm));
64982 mm->mm_rb = RB_ROOT;
64983 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64984
64985 prev = NULL;
64986 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
64987 - struct file *file;
64988 -
64989 if (mpnt->vm_flags & VM_DONTCOPY) {
64990 long pages = vma_pages(mpnt);
64991 mm->total_vm -= pages;
64992 @@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64993 -pages);
64994 continue;
64995 }
64996 - charge = 0;
64997 - if (mpnt->vm_flags & VM_ACCOUNT) {
64998 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
64999 - if (security_vm_enough_memory(len))
65000 - goto fail_nomem;
65001 - charge = len;
65002 + tmp = dup_vma(mm, mpnt);
65003 + if (!tmp) {
65004 + retval = -ENOMEM;
65005 + goto out;
65006 }
65007 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65008 - if (!tmp)
65009 - goto fail_nomem;
65010 - *tmp = *mpnt;
65011 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65012 - pol = mpol_dup(vma_policy(mpnt));
65013 - retval = PTR_ERR(pol);
65014 - if (IS_ERR(pol))
65015 - goto fail_nomem_policy;
65016 - vma_set_policy(tmp, pol);
65017 - tmp->vm_mm = mm;
65018 - if (anon_vma_fork(tmp, mpnt))
65019 - goto fail_nomem_anon_vma_fork;
65020 - tmp->vm_flags &= ~VM_LOCKED;
65021 - tmp->vm_next = tmp->vm_prev = NULL;
65022 - file = tmp->vm_file;
65023 - if (file) {
65024 - struct inode *inode = file->f_path.dentry->d_inode;
65025 - struct address_space *mapping = file->f_mapping;
65026 -
65027 - get_file(file);
65028 - if (tmp->vm_flags & VM_DENYWRITE)
65029 - atomic_dec(&inode->i_writecount);
65030 - mutex_lock(&mapping->i_mmap_mutex);
65031 - if (tmp->vm_flags & VM_SHARED)
65032 - mapping->i_mmap_writable++;
65033 - flush_dcache_mmap_lock(mapping);
65034 - /* insert tmp into the share list, just after mpnt */
65035 - vma_prio_tree_add(tmp, mpnt);
65036 - flush_dcache_mmap_unlock(mapping);
65037 - mutex_unlock(&mapping->i_mmap_mutex);
65038 - }
65039 -
65040 - /*
65041 - * Clear hugetlb-related page reserves for children. This only
65042 - * affects MAP_PRIVATE mappings. Faults generated by the child
65043 - * are not guaranteed to succeed, even if read-only
65044 - */
65045 - if (is_vm_hugetlb_page(tmp))
65046 - reset_vma_resv_huge_pages(tmp);
65047
65048 /*
65049 * Link in the new vma and copy the page table entries.
65050 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65051 if (retval)
65052 goto out;
65053 }
65054 +
65055 +#ifdef CONFIG_PAX_SEGMEXEC
65056 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65057 + struct vm_area_struct *mpnt_m;
65058 +
65059 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65060 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65061 +
65062 + if (!mpnt->vm_mirror)
65063 + continue;
65064 +
65065 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65066 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65067 + mpnt->vm_mirror = mpnt_m;
65068 + } else {
65069 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65070 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65071 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65072 + mpnt->vm_mirror->vm_mirror = mpnt;
65073 + }
65074 + }
65075 + BUG_ON(mpnt_m);
65076 + }
65077 +#endif
65078 +
65079 /* a new mm has just been created */
65080 arch_dup_mmap(oldmm, mm);
65081 retval = 0;
65082 @@ -429,14 +474,6 @@ out:
65083 flush_tlb_mm(oldmm);
65084 up_write(&oldmm->mmap_sem);
65085 return retval;
65086 -fail_nomem_anon_vma_fork:
65087 - mpol_put(pol);
65088 -fail_nomem_policy:
65089 - kmem_cache_free(vm_area_cachep, tmp);
65090 -fail_nomem:
65091 - retval = -ENOMEM;
65092 - vm_unacct_memory(charge);
65093 - goto out;
65094 }
65095
65096 static inline int mm_alloc_pgd(struct mm_struct *mm)
65097 @@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65098 return ERR_PTR(err);
65099
65100 mm = get_task_mm(task);
65101 - if (mm && mm != current->mm &&
65102 - !ptrace_may_access(task, mode)) {
65103 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65104 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65105 mmput(mm);
65106 mm = ERR_PTR(-EACCES);
65107 }
65108 @@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65109 spin_unlock(&fs->lock);
65110 return -EAGAIN;
65111 }
65112 - fs->users++;
65113 + atomic_inc(&fs->users);
65114 spin_unlock(&fs->lock);
65115 return 0;
65116 }
65117 tsk->fs = copy_fs_struct(fs);
65118 if (!tsk->fs)
65119 return -ENOMEM;
65120 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65121 return 0;
65122 }
65123
65124 @@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65125 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65126 #endif
65127 retval = -EAGAIN;
65128 +
65129 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65130 +
65131 if (atomic_read(&p->real_cred->user->processes) >=
65132 task_rlimit(p, RLIMIT_NPROC)) {
65133 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65134 @@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65135 if (clone_flags & CLONE_THREAD)
65136 p->tgid = current->tgid;
65137
65138 + gr_copy_label(p);
65139 +
65140 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65141 /*
65142 * Clear TID on mm_release()?
65143 @@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
65144 bad_fork_free:
65145 free_task(p);
65146 fork_out:
65147 + gr_log_forkfail(retval);
65148 +
65149 return ERR_PTR(retval);
65150 }
65151
65152 @@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
65153 if (clone_flags & CLONE_PARENT_SETTID)
65154 put_user(nr, parent_tidptr);
65155
65156 + gr_handle_brute_check();
65157 +
65158 if (clone_flags & CLONE_VFORK) {
65159 p->vfork_done = &vfork;
65160 init_completion(&vfork);
65161 @@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65162 return 0;
65163
65164 /* don't need lock here; in the worst case we'll do useless copy */
65165 - if (fs->users == 1)
65166 + if (atomic_read(&fs->users) == 1)
65167 return 0;
65168
65169 *new_fsp = copy_fs_struct(fs);
65170 @@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65171 fs = current->fs;
65172 spin_lock(&fs->lock);
65173 current->fs = new_fs;
65174 - if (--fs->users)
65175 + gr_set_chroot_entries(current, &current->fs->root);
65176 + if (atomic_dec_return(&fs->users))
65177 new_fs = NULL;
65178 else
65179 new_fs = fs;
65180 diff --git a/kernel/futex.c b/kernel/futex.c
65181 index 1614be2..37abc7e 100644
65182 --- a/kernel/futex.c
65183 +++ b/kernel/futex.c
65184 @@ -54,6 +54,7 @@
65185 #include <linux/mount.h>
65186 #include <linux/pagemap.h>
65187 #include <linux/syscalls.h>
65188 +#include <linux/ptrace.h>
65189 #include <linux/signal.h>
65190 #include <linux/export.h>
65191 #include <linux/magic.h>
65192 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65193 struct page *page, *page_head;
65194 int err, ro = 0;
65195
65196 +#ifdef CONFIG_PAX_SEGMEXEC
65197 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65198 + return -EFAULT;
65199 +#endif
65200 +
65201 /*
65202 * The futex address must be "naturally" aligned.
65203 */
65204 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65205 if (!p)
65206 goto err_unlock;
65207 ret = -EPERM;
65208 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65209 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65210 + goto err_unlock;
65211 +#endif
65212 pcred = __task_cred(p);
65213 /* If victim is in different user_ns, then uids are not
65214 comparable, so we must have CAP_SYS_PTRACE */
65215 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
65216 {
65217 u32 curval;
65218 int i;
65219 + mm_segment_t oldfs;
65220
65221 /*
65222 * This will fail and we want it. Some arch implementations do
65223 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
65224 * implementation, the non-functional ones will return
65225 * -ENOSYS.
65226 */
65227 + oldfs = get_fs();
65228 + set_fs(USER_DS);
65229 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65230 futex_cmpxchg_enabled = 1;
65231 + set_fs(oldfs);
65232
65233 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65234 plist_head_init(&futex_queues[i].chain);
65235 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65236 index 5f9e689..582d46d 100644
65237 --- a/kernel/futex_compat.c
65238 +++ b/kernel/futex_compat.c
65239 @@ -10,6 +10,7 @@
65240 #include <linux/compat.h>
65241 #include <linux/nsproxy.h>
65242 #include <linux/futex.h>
65243 +#include <linux/ptrace.h>
65244
65245 #include <asm/uaccess.h>
65246
65247 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65248 {
65249 struct compat_robust_list_head __user *head;
65250 unsigned long ret;
65251 - const struct cred *cred = current_cred(), *pcred;
65252 + const struct cred *cred = current_cred();
65253 + const struct cred *pcred;
65254
65255 if (!futex_cmpxchg_enabled)
65256 return -ENOSYS;
65257 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65258 if (!p)
65259 goto err_unlock;
65260 ret = -EPERM;
65261 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65262 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65263 + goto err_unlock;
65264 +#endif
65265 pcred = __task_cred(p);
65266 /* If victim is in different user_ns, then uids are not
65267 comparable, so we must have CAP_SYS_PTRACE */
65268 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65269 index 9b22d03..6295b62 100644
65270 --- a/kernel/gcov/base.c
65271 +++ b/kernel/gcov/base.c
65272 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65273 }
65274
65275 #ifdef CONFIG_MODULES
65276 -static inline int within(void *addr, void *start, unsigned long size)
65277 -{
65278 - return ((addr >= start) && (addr < start + size));
65279 -}
65280 -
65281 /* Update list and generate events when modules are unloaded. */
65282 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65283 void *data)
65284 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65285 prev = NULL;
65286 /* Remove entries located in module from linked list. */
65287 for (info = gcov_info_head; info; info = info->next) {
65288 - if (within(info, mod->module_core, mod->core_size)) {
65289 + if (within_module_core_rw((unsigned long)info, mod)) {
65290 if (prev)
65291 prev->next = info->next;
65292 else
65293 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65294 index ae34bf5..4e2f3d0 100644
65295 --- a/kernel/hrtimer.c
65296 +++ b/kernel/hrtimer.c
65297 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65298 local_irq_restore(flags);
65299 }
65300
65301 -static void run_hrtimer_softirq(struct softirq_action *h)
65302 +static void run_hrtimer_softirq(void)
65303 {
65304 hrtimer_peek_ahead_timers();
65305 }
65306 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65307 index 01d3b70..9e4d098 100644
65308 --- a/kernel/jump_label.c
65309 +++ b/kernel/jump_label.c
65310 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65311
65312 size = (((unsigned long)stop - (unsigned long)start)
65313 / sizeof(struct jump_entry));
65314 + pax_open_kernel();
65315 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65316 + pax_close_kernel();
65317 }
65318
65319 static void jump_label_update(struct jump_label_key *key, int enable);
65320 @@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65321 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65322 struct jump_entry *iter;
65323
65324 + pax_open_kernel();
65325 for (iter = iter_start; iter < iter_stop; iter++) {
65326 if (within_module_init(iter->code, mod))
65327 iter->code = 0;
65328 }
65329 + pax_close_kernel();
65330 }
65331
65332 static int
65333 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65334 index 079f1d3..a407562 100644
65335 --- a/kernel/kallsyms.c
65336 +++ b/kernel/kallsyms.c
65337 @@ -11,6 +11,9 @@
65338 * Changed the compression method from stem compression to "table lookup"
65339 * compression (see scripts/kallsyms.c for a more complete description)
65340 */
65341 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65342 +#define __INCLUDED_BY_HIDESYM 1
65343 +#endif
65344 #include <linux/kallsyms.h>
65345 #include <linux/module.h>
65346 #include <linux/init.h>
65347 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65348
65349 static inline int is_kernel_inittext(unsigned long addr)
65350 {
65351 + if (system_state != SYSTEM_BOOTING)
65352 + return 0;
65353 +
65354 if (addr >= (unsigned long)_sinittext
65355 && addr <= (unsigned long)_einittext)
65356 return 1;
65357 return 0;
65358 }
65359
65360 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65361 +#ifdef CONFIG_MODULES
65362 +static inline int is_module_text(unsigned long addr)
65363 +{
65364 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65365 + return 1;
65366 +
65367 + addr = ktla_ktva(addr);
65368 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65369 +}
65370 +#else
65371 +static inline int is_module_text(unsigned long addr)
65372 +{
65373 + return 0;
65374 +}
65375 +#endif
65376 +#endif
65377 +
65378 static inline int is_kernel_text(unsigned long addr)
65379 {
65380 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65381 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65382
65383 static inline int is_kernel(unsigned long addr)
65384 {
65385 +
65386 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65387 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65388 + return 1;
65389 +
65390 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65391 +#else
65392 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65393 +#endif
65394 +
65395 return 1;
65396 return in_gate_area_no_mm(addr);
65397 }
65398
65399 static int is_ksym_addr(unsigned long addr)
65400 {
65401 +
65402 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65403 + if (is_module_text(addr))
65404 + return 0;
65405 +#endif
65406 +
65407 if (all_var)
65408 return is_kernel(addr);
65409
65410 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65411
65412 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65413 {
65414 - iter->name[0] = '\0';
65415 iter->nameoff = get_symbol_offset(new_pos);
65416 iter->pos = new_pos;
65417 }
65418 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65419 {
65420 struct kallsym_iter *iter = m->private;
65421
65422 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65423 + if (current_uid())
65424 + return 0;
65425 +#endif
65426 +
65427 /* Some debugging symbols have no name. Ignore them. */
65428 if (!iter->name[0])
65429 return 0;
65430 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65431 struct kallsym_iter *iter;
65432 int ret;
65433
65434 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65435 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65436 if (!iter)
65437 return -ENOMEM;
65438 reset_iter(iter, 0);
65439 diff --git a/kernel/kexec.c b/kernel/kexec.c
65440 index 7b08867..3bac516 100644
65441 --- a/kernel/kexec.c
65442 +++ b/kernel/kexec.c
65443 @@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65444 unsigned long flags)
65445 {
65446 struct compat_kexec_segment in;
65447 - struct kexec_segment out, __user *ksegments;
65448 + struct kexec_segment out;
65449 + struct kexec_segment __user *ksegments;
65450 unsigned long i, result;
65451
65452 /* Don't allow clients that don't understand the native
65453 diff --git a/kernel/kmod.c b/kernel/kmod.c
65454 index a0a8854..642b106 100644
65455 --- a/kernel/kmod.c
65456 +++ b/kernel/kmod.c
65457 @@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
65458 * If module auto-loading support is disabled then this function
65459 * becomes a no-operation.
65460 */
65461 -int __request_module(bool wait, const char *fmt, ...)
65462 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65463 {
65464 - va_list args;
65465 char module_name[MODULE_NAME_LEN];
65466 unsigned int max_modprobes;
65467 int ret;
65468 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
65469 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
65470 static char *envp[] = { "HOME=/",
65471 "TERM=linux",
65472 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
65473 @@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
65474 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65475 static int kmod_loop_msg;
65476
65477 - va_start(args, fmt);
65478 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65479 - va_end(args);
65480 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65481 if (ret >= MODULE_NAME_LEN)
65482 return -ENAMETOOLONG;
65483
65484 @@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
65485 if (ret)
65486 return ret;
65487
65488 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65489 + if (!current_uid()) {
65490 + /* hack to workaround consolekit/udisks stupidity */
65491 + read_lock(&tasklist_lock);
65492 + if (!strcmp(current->comm, "mount") &&
65493 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65494 + read_unlock(&tasklist_lock);
65495 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65496 + return -EPERM;
65497 + }
65498 + read_unlock(&tasklist_lock);
65499 + }
65500 +#endif
65501 +
65502 /* If modprobe needs a service that is in a module, we get a recursive
65503 * loop. Limit the number of running kmod threads to max_threads/2 or
65504 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65505 @@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
65506 atomic_dec(&kmod_concurrent);
65507 return ret;
65508 }
65509 +
65510 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65511 +{
65512 + va_list args;
65513 + int ret;
65514 +
65515 + va_start(args, fmt);
65516 + ret = ____request_module(wait, module_param, fmt, args);
65517 + va_end(args);
65518 +
65519 + return ret;
65520 +}
65521 +
65522 +int __request_module(bool wait, const char *fmt, ...)
65523 +{
65524 + va_list args;
65525 + int ret;
65526 +
65527 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65528 + if (current_uid()) {
65529 + char module_param[MODULE_NAME_LEN];
65530 +
65531 + memset(module_param, 0, sizeof(module_param));
65532 +
65533 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65534 +
65535 + va_start(args, fmt);
65536 + ret = ____request_module(wait, module_param, fmt, args);
65537 + va_end(args);
65538 +
65539 + return ret;
65540 + }
65541 +#endif
65542 +
65543 + va_start(args, fmt);
65544 + ret = ____request_module(wait, NULL, fmt, args);
65545 + va_end(args);
65546 +
65547 + return ret;
65548 +}
65549 +
65550 EXPORT_SYMBOL(__request_module);
65551 #endif /* CONFIG_MODULES */
65552
65553 @@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
65554 *
65555 * Thus the __user pointer cast is valid here.
65556 */
65557 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65558 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65559
65560 /*
65561 * If ret is 0, either ____call_usermodehelper failed and the
65562 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65563 index c62b854..cb67968 100644
65564 --- a/kernel/kprobes.c
65565 +++ b/kernel/kprobes.c
65566 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65567 * kernel image and loaded module images reside. This is required
65568 * so x86_64 can correctly handle the %rip-relative fixups.
65569 */
65570 - kip->insns = module_alloc(PAGE_SIZE);
65571 + kip->insns = module_alloc_exec(PAGE_SIZE);
65572 if (!kip->insns) {
65573 kfree(kip);
65574 return NULL;
65575 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65576 */
65577 if (!list_is_singular(&kip->list)) {
65578 list_del(&kip->list);
65579 - module_free(NULL, kip->insns);
65580 + module_free_exec(NULL, kip->insns);
65581 kfree(kip);
65582 }
65583 return 1;
65584 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65585 {
65586 int i, err = 0;
65587 unsigned long offset = 0, size = 0;
65588 - char *modname, namebuf[128];
65589 + char *modname, namebuf[KSYM_NAME_LEN];
65590 const char *symbol_name;
65591 void *addr;
65592 struct kprobe_blackpoint *kb;
65593 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65594 const char *sym = NULL;
65595 unsigned int i = *(loff_t *) v;
65596 unsigned long offset = 0;
65597 - char *modname, namebuf[128];
65598 + char *modname, namebuf[KSYM_NAME_LEN];
65599
65600 head = &kprobe_table[i];
65601 preempt_disable();
65602 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65603 index 8889f7d..95319b7 100644
65604 --- a/kernel/lockdep.c
65605 +++ b/kernel/lockdep.c
65606 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
65607 end = (unsigned long) &_end,
65608 addr = (unsigned long) obj;
65609
65610 +#ifdef CONFIG_PAX_KERNEXEC
65611 + start = ktla_ktva(start);
65612 +#endif
65613 +
65614 /*
65615 * static variable?
65616 */
65617 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65618 if (!static_obj(lock->key)) {
65619 debug_locks_off();
65620 printk("INFO: trying to register non-static key.\n");
65621 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65622 printk("the code is fine but needs lockdep annotation.\n");
65623 printk("turning off the locking correctness validator.\n");
65624 dump_stack();
65625 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65626 if (!class)
65627 return 0;
65628 }
65629 - atomic_inc((atomic_t *)&class->ops);
65630 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65631 if (very_verbose(class)) {
65632 printk("\nacquire class [%p] %s", class->key, class->name);
65633 if (class->name_version > 1)
65634 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65635 index 91c32a0..b2c71c5 100644
65636 --- a/kernel/lockdep_proc.c
65637 +++ b/kernel/lockdep_proc.c
65638 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65639
65640 static void print_name(struct seq_file *m, struct lock_class *class)
65641 {
65642 - char str[128];
65643 + char str[KSYM_NAME_LEN];
65644 const char *name = class->name;
65645
65646 if (!name) {
65647 diff --git a/kernel/module.c b/kernel/module.c
65648 index 2c93276..476fe81 100644
65649 --- a/kernel/module.c
65650 +++ b/kernel/module.c
65651 @@ -58,6 +58,7 @@
65652 #include <linux/jump_label.h>
65653 #include <linux/pfn.h>
65654 #include <linux/bsearch.h>
65655 +#include <linux/grsecurity.h>
65656
65657 #define CREATE_TRACE_POINTS
65658 #include <trace/events/module.h>
65659 @@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65660
65661 /* Bounds of module allocation, for speeding __module_address.
65662 * Protected by module_mutex. */
65663 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
65664 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
65665 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
65666
65667 int register_module_notifier(struct notifier_block * nb)
65668 {
65669 @@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65670 return true;
65671
65672 list_for_each_entry_rcu(mod, &modules, list) {
65673 - struct symsearch arr[] = {
65674 + struct symsearch modarr[] = {
65675 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
65676 NOT_GPL_ONLY, false },
65677 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
65678 @@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65679 #endif
65680 };
65681
65682 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
65683 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
65684 return true;
65685 }
65686 return false;
65687 @@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
65688 static int percpu_modalloc(struct module *mod,
65689 unsigned long size, unsigned long align)
65690 {
65691 - if (align > PAGE_SIZE) {
65692 + if (align-1 >= PAGE_SIZE) {
65693 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
65694 mod->name, align, PAGE_SIZE);
65695 align = PAGE_SIZE;
65696 @@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
65697 static ssize_t show_coresize(struct module_attribute *mattr,
65698 struct module_kobject *mk, char *buffer)
65699 {
65700 - return sprintf(buffer, "%u\n", mk->mod->core_size);
65701 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
65702 }
65703
65704 static struct module_attribute modinfo_coresize =
65705 @@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
65706 static ssize_t show_initsize(struct module_attribute *mattr,
65707 struct module_kobject *mk, char *buffer)
65708 {
65709 - return sprintf(buffer, "%u\n", mk->mod->init_size);
65710 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
65711 }
65712
65713 static struct module_attribute modinfo_initsize =
65714 @@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
65715 */
65716 #ifdef CONFIG_SYSFS
65717
65718 -#ifdef CONFIG_KALLSYMS
65719 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65720 static inline bool sect_empty(const Elf_Shdr *sect)
65721 {
65722 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
65723 @@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
65724
65725 static void unset_module_core_ro_nx(struct module *mod)
65726 {
65727 - set_page_attributes(mod->module_core + mod->core_text_size,
65728 - mod->module_core + mod->core_size,
65729 + set_page_attributes(mod->module_core_rw,
65730 + mod->module_core_rw + mod->core_size_rw,
65731 set_memory_x);
65732 - set_page_attributes(mod->module_core,
65733 - mod->module_core + mod->core_ro_size,
65734 + set_page_attributes(mod->module_core_rx,
65735 + mod->module_core_rx + mod->core_size_rx,
65736 set_memory_rw);
65737 }
65738
65739 static void unset_module_init_ro_nx(struct module *mod)
65740 {
65741 - set_page_attributes(mod->module_init + mod->init_text_size,
65742 - mod->module_init + mod->init_size,
65743 + set_page_attributes(mod->module_init_rw,
65744 + mod->module_init_rw + mod->init_size_rw,
65745 set_memory_x);
65746 - set_page_attributes(mod->module_init,
65747 - mod->module_init + mod->init_ro_size,
65748 + set_page_attributes(mod->module_init_rx,
65749 + mod->module_init_rx + mod->init_size_rx,
65750 set_memory_rw);
65751 }
65752
65753 @@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
65754
65755 mutex_lock(&module_mutex);
65756 list_for_each_entry_rcu(mod, &modules, list) {
65757 - if ((mod->module_core) && (mod->core_text_size)) {
65758 - set_page_attributes(mod->module_core,
65759 - mod->module_core + mod->core_text_size,
65760 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
65761 + set_page_attributes(mod->module_core_rx,
65762 + mod->module_core_rx + mod->core_size_rx,
65763 set_memory_rw);
65764 }
65765 - if ((mod->module_init) && (mod->init_text_size)) {
65766 - set_page_attributes(mod->module_init,
65767 - mod->module_init + mod->init_text_size,
65768 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
65769 + set_page_attributes(mod->module_init_rx,
65770 + mod->module_init_rx + mod->init_size_rx,
65771 set_memory_rw);
65772 }
65773 }
65774 @@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
65775
65776 mutex_lock(&module_mutex);
65777 list_for_each_entry_rcu(mod, &modules, list) {
65778 - if ((mod->module_core) && (mod->core_text_size)) {
65779 - set_page_attributes(mod->module_core,
65780 - mod->module_core + mod->core_text_size,
65781 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
65782 + set_page_attributes(mod->module_core_rx,
65783 + mod->module_core_rx + mod->core_size_rx,
65784 set_memory_ro);
65785 }
65786 - if ((mod->module_init) && (mod->init_text_size)) {
65787 - set_page_attributes(mod->module_init,
65788 - mod->module_init + mod->init_text_size,
65789 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
65790 + set_page_attributes(mod->module_init_rx,
65791 + mod->module_init_rx + mod->init_size_rx,
65792 set_memory_ro);
65793 }
65794 }
65795 @@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
65796
65797 /* This may be NULL, but that's OK */
65798 unset_module_init_ro_nx(mod);
65799 - module_free(mod, mod->module_init);
65800 + module_free(mod, mod->module_init_rw);
65801 + module_free_exec(mod, mod->module_init_rx);
65802 kfree(mod->args);
65803 percpu_modfree(mod);
65804
65805 /* Free lock-classes: */
65806 - lockdep_free_key_range(mod->module_core, mod->core_size);
65807 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
65808 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
65809
65810 /* Finally, free the core (containing the module structure) */
65811 unset_module_core_ro_nx(mod);
65812 - module_free(mod, mod->module_core);
65813 + module_free_exec(mod, mod->module_core_rx);
65814 + module_free(mod, mod->module_core_rw);
65815
65816 #ifdef CONFIG_MPU
65817 update_protections(current->mm);
65818 @@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65819 unsigned int i;
65820 int ret = 0;
65821 const struct kernel_symbol *ksym;
65822 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65823 + int is_fs_load = 0;
65824 + int register_filesystem_found = 0;
65825 + char *p;
65826 +
65827 + p = strstr(mod->args, "grsec_modharden_fs");
65828 + if (p) {
65829 + char *endptr = p + strlen("grsec_modharden_fs");
65830 + /* copy \0 as well */
65831 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
65832 + is_fs_load = 1;
65833 + }
65834 +#endif
65835
65836 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
65837 const char *name = info->strtab + sym[i].st_name;
65838
65839 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65840 + /* it's a real shame this will never get ripped and copied
65841 + upstream! ;(
65842 + */
65843 + if (is_fs_load && !strcmp(name, "register_filesystem"))
65844 + register_filesystem_found = 1;
65845 +#endif
65846 +
65847 switch (sym[i].st_shndx) {
65848 case SHN_COMMON:
65849 /* We compiled with -fno-common. These are not
65850 @@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65851 ksym = resolve_symbol_wait(mod, info, name);
65852 /* Ok if resolved. */
65853 if (ksym && !IS_ERR(ksym)) {
65854 + pax_open_kernel();
65855 sym[i].st_value = ksym->value;
65856 + pax_close_kernel();
65857 break;
65858 }
65859
65860 @@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65861 secbase = (unsigned long)mod_percpu(mod);
65862 else
65863 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
65864 + pax_open_kernel();
65865 sym[i].st_value += secbase;
65866 + pax_close_kernel();
65867 break;
65868 }
65869 }
65870
65871 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65872 + if (is_fs_load && !register_filesystem_found) {
65873 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
65874 + ret = -EPERM;
65875 + }
65876 +#endif
65877 +
65878 return ret;
65879 }
65880
65881 @@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
65882 || s->sh_entsize != ~0UL
65883 || strstarts(sname, ".init"))
65884 continue;
65885 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
65886 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
65887 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
65888 + else
65889 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
65890 pr_debug("\t%s\n", sname);
65891 }
65892 - switch (m) {
65893 - case 0: /* executable */
65894 - mod->core_size = debug_align(mod->core_size);
65895 - mod->core_text_size = mod->core_size;
65896 - break;
65897 - case 1: /* RO: text and ro-data */
65898 - mod->core_size = debug_align(mod->core_size);
65899 - mod->core_ro_size = mod->core_size;
65900 - break;
65901 - case 3: /* whole core */
65902 - mod->core_size = debug_align(mod->core_size);
65903 - break;
65904 - }
65905 }
65906
65907 pr_debug("Init section allocation order:\n");
65908 @@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
65909 || s->sh_entsize != ~0UL
65910 || !strstarts(sname, ".init"))
65911 continue;
65912 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
65913 - | INIT_OFFSET_MASK);
65914 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
65915 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
65916 + else
65917 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
65918 + s->sh_entsize |= INIT_OFFSET_MASK;
65919 pr_debug("\t%s\n", sname);
65920 }
65921 - switch (m) {
65922 - case 0: /* executable */
65923 - mod->init_size = debug_align(mod->init_size);
65924 - mod->init_text_size = mod->init_size;
65925 - break;
65926 - case 1: /* RO: text and ro-data */
65927 - mod->init_size = debug_align(mod->init_size);
65928 - mod->init_ro_size = mod->init_size;
65929 - break;
65930 - case 3: /* whole init */
65931 - mod->init_size = debug_align(mod->init_size);
65932 - break;
65933 - }
65934 }
65935 }
65936
65937 @@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
65938
65939 /* Put symbol section at end of init part of module. */
65940 symsect->sh_flags |= SHF_ALLOC;
65941 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
65942 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
65943 info->index.sym) | INIT_OFFSET_MASK;
65944 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
65945
65946 @@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
65947 }
65948
65949 /* Append room for core symbols at end of core part. */
65950 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
65951 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
65952 - mod->core_size += strtab_size;
65953 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
65954 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
65955 + mod->core_size_rx += strtab_size;
65956
65957 /* Put string table section at end of init part of module. */
65958 strsect->sh_flags |= SHF_ALLOC;
65959 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
65960 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
65961 info->index.str) | INIT_OFFSET_MASK;
65962 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
65963 }
65964 @@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
65965 /* Make sure we get permanent strtab: don't use info->strtab. */
65966 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
65967
65968 + pax_open_kernel();
65969 +
65970 /* Set types up while we still have access to sections. */
65971 for (i = 0; i < mod->num_symtab; i++)
65972 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
65973
65974 - mod->core_symtab = dst = mod->module_core + info->symoffs;
65975 - mod->core_strtab = s = mod->module_core + info->stroffs;
65976 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
65977 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
65978 src = mod->symtab;
65979 *dst = *src;
65980 *s++ = 0;
65981 @@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
65982 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
65983 }
65984 mod->core_num_syms = ndst;
65985 +
65986 + pax_close_kernel();
65987 }
65988 #else
65989 static inline void layout_symtab(struct module *mod, struct load_info *info)
65990 @@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
65991 return size == 0 ? NULL : vmalloc_exec(size);
65992 }
65993
65994 -static void *module_alloc_update_bounds(unsigned long size)
65995 +static void *module_alloc_update_bounds_rw(unsigned long size)
65996 {
65997 void *ret = module_alloc(size);
65998
65999 if (ret) {
66000 mutex_lock(&module_mutex);
66001 /* Update module bounds. */
66002 - if ((unsigned long)ret < module_addr_min)
66003 - module_addr_min = (unsigned long)ret;
66004 - if ((unsigned long)ret + size > module_addr_max)
66005 - module_addr_max = (unsigned long)ret + size;
66006 + if ((unsigned long)ret < module_addr_min_rw)
66007 + module_addr_min_rw = (unsigned long)ret;
66008 + if ((unsigned long)ret + size > module_addr_max_rw)
66009 + module_addr_max_rw = (unsigned long)ret + size;
66010 + mutex_unlock(&module_mutex);
66011 + }
66012 + return ret;
66013 +}
66014 +
66015 +static void *module_alloc_update_bounds_rx(unsigned long size)
66016 +{
66017 + void *ret = module_alloc_exec(size);
66018 +
66019 + if (ret) {
66020 + mutex_lock(&module_mutex);
66021 + /* Update module bounds. */
66022 + if ((unsigned long)ret < module_addr_min_rx)
66023 + module_addr_min_rx = (unsigned long)ret;
66024 + if ((unsigned long)ret + size > module_addr_max_rx)
66025 + module_addr_max_rx = (unsigned long)ret + size;
66026 mutex_unlock(&module_mutex);
66027 }
66028 return ret;
66029 @@ -2513,8 +2550,14 @@ static struct module *setup_load_info(struct load_info *info)
66030 static int check_modinfo(struct module *mod, struct load_info *info)
66031 {
66032 const char *modmagic = get_modinfo(info, "vermagic");
66033 + const char *license = get_modinfo(info, "license");
66034 int err;
66035
66036 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66037 + if (!license || !license_is_gpl_compatible(license))
66038 + return -ENOEXEC;
66039 +#endif
66040 +
66041 /* This is allowed: modprobe --force will invalidate it. */
66042 if (!modmagic) {
66043 err = try_to_force_load(mod, "bad vermagic");
66044 @@ -2537,7 +2580,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66045 }
66046
66047 /* Set up license info based on the info section */
66048 - set_license(mod, get_modinfo(info, "license"));
66049 + set_license(mod, license);
66050
66051 return 0;
66052 }
66053 @@ -2631,7 +2674,7 @@ static int move_module(struct module *mod, struct load_info *info)
66054 void *ptr;
66055
66056 /* Do the allocs. */
66057 - ptr = module_alloc_update_bounds(mod->core_size);
66058 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66059 /*
66060 * The pointer to this block is stored in the module structure
66061 * which is inside the block. Just mark it as not being a
66062 @@ -2641,23 +2684,50 @@ static int move_module(struct module *mod, struct load_info *info)
66063 if (!ptr)
66064 return -ENOMEM;
66065
66066 - memset(ptr, 0, mod->core_size);
66067 - mod->module_core = ptr;
66068 + memset(ptr, 0, mod->core_size_rw);
66069 + mod->module_core_rw = ptr;
66070
66071 - ptr = module_alloc_update_bounds(mod->init_size);
66072 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66073 /*
66074 * The pointer to this block is stored in the module structure
66075 * which is inside the block. This block doesn't need to be
66076 * scanned as it contains data and code that will be freed
66077 * after the module is initialized.
66078 */
66079 - kmemleak_ignore(ptr);
66080 - if (!ptr && mod->init_size) {
66081 - module_free(mod, mod->module_core);
66082 + kmemleak_not_leak(ptr);
66083 + if (!ptr && mod->init_size_rw) {
66084 + module_free(mod, mod->module_core_rw);
66085 return -ENOMEM;
66086 }
66087 - memset(ptr, 0, mod->init_size);
66088 - mod->module_init = ptr;
66089 + memset(ptr, 0, mod->init_size_rw);
66090 + mod->module_init_rw = ptr;
66091 +
66092 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66093 + kmemleak_not_leak(ptr);
66094 + if (!ptr) {
66095 + module_free(mod, mod->module_init_rw);
66096 + module_free(mod, mod->module_core_rw);
66097 + return -ENOMEM;
66098 + }
66099 +
66100 + pax_open_kernel();
66101 + memset(ptr, 0, mod->core_size_rx);
66102 + pax_close_kernel();
66103 + mod->module_core_rx = ptr;
66104 +
66105 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66106 + kmemleak_not_leak(ptr);
66107 + if (!ptr && mod->init_size_rx) {
66108 + module_free_exec(mod, mod->module_core_rx);
66109 + module_free(mod, mod->module_init_rw);
66110 + module_free(mod, mod->module_core_rw);
66111 + return -ENOMEM;
66112 + }
66113 +
66114 + pax_open_kernel();
66115 + memset(ptr, 0, mod->init_size_rx);
66116 + pax_close_kernel();
66117 + mod->module_init_rx = ptr;
66118
66119 /* Transfer each section which specifies SHF_ALLOC */
66120 pr_debug("final section addresses:\n");
66121 @@ -2668,16 +2738,45 @@ static int move_module(struct module *mod, struct load_info *info)
66122 if (!(shdr->sh_flags & SHF_ALLOC))
66123 continue;
66124
66125 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66126 - dest = mod->module_init
66127 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66128 - else
66129 - dest = mod->module_core + shdr->sh_entsize;
66130 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66131 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66132 + dest = mod->module_init_rw
66133 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66134 + else
66135 + dest = mod->module_init_rx
66136 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66137 + } else {
66138 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66139 + dest = mod->module_core_rw + shdr->sh_entsize;
66140 + else
66141 + dest = mod->module_core_rx + shdr->sh_entsize;
66142 + }
66143 +
66144 + if (shdr->sh_type != SHT_NOBITS) {
66145 +
66146 +#ifdef CONFIG_PAX_KERNEXEC
66147 +#ifdef CONFIG_X86_64
66148 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66149 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66150 +#endif
66151 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66152 + pax_open_kernel();
66153 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66154 + pax_close_kernel();
66155 + } else
66156 +#endif
66157
66158 - if (shdr->sh_type != SHT_NOBITS)
66159 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66160 + }
66161 /* Update sh_addr to point to copy in image. */
66162 - shdr->sh_addr = (unsigned long)dest;
66163 +
66164 +#ifdef CONFIG_PAX_KERNEXEC
66165 + if (shdr->sh_flags & SHF_EXECINSTR)
66166 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66167 + else
66168 +#endif
66169 +
66170 + shdr->sh_addr = (unsigned long)dest;
66171 pr_debug("\t0x%lx %s\n",
66172 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66173 }
66174 @@ -2728,12 +2827,12 @@ static void flush_module_icache(const struct module *mod)
66175 * Do it before processing of module parameters, so the module
66176 * can provide parameter accessor functions of its own.
66177 */
66178 - if (mod->module_init)
66179 - flush_icache_range((unsigned long)mod->module_init,
66180 - (unsigned long)mod->module_init
66181 - + mod->init_size);
66182 - flush_icache_range((unsigned long)mod->module_core,
66183 - (unsigned long)mod->module_core + mod->core_size);
66184 + if (mod->module_init_rx)
66185 + flush_icache_range((unsigned long)mod->module_init_rx,
66186 + (unsigned long)mod->module_init_rx
66187 + + mod->init_size_rx);
66188 + flush_icache_range((unsigned long)mod->module_core_rx,
66189 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66190
66191 set_fs(old_fs);
66192 }
66193 @@ -2803,8 +2902,10 @@ out:
66194 static void module_deallocate(struct module *mod, struct load_info *info)
66195 {
66196 percpu_modfree(mod);
66197 - module_free(mod, mod->module_init);
66198 - module_free(mod, mod->module_core);
66199 + module_free_exec(mod, mod->module_init_rx);
66200 + module_free_exec(mod, mod->module_core_rx);
66201 + module_free(mod, mod->module_init_rw);
66202 + module_free(mod, mod->module_core_rw);
66203 }
66204
66205 int __weak module_finalize(const Elf_Ehdr *hdr,
66206 @@ -2868,9 +2969,38 @@ static struct module *load_module(void __user *umod,
66207 if (err)
66208 goto free_unload;
66209
66210 + /* Now copy in args */
66211 + mod->args = strndup_user(uargs, ~0UL >> 1);
66212 + if (IS_ERR(mod->args)) {
66213 + err = PTR_ERR(mod->args);
66214 + goto free_unload;
66215 + }
66216 +
66217 /* Set up MODINFO_ATTR fields */
66218 setup_modinfo(mod, &info);
66219
66220 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66221 + {
66222 + char *p, *p2;
66223 +
66224 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66225 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66226 + err = -EPERM;
66227 + goto free_modinfo;
66228 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66229 + p += strlen("grsec_modharden_normal");
66230 + p2 = strstr(p, "_");
66231 + if (p2) {
66232 + *p2 = '\0';
66233 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66234 + *p2 = '_';
66235 + }
66236 + err = -EPERM;
66237 + goto free_modinfo;
66238 + }
66239 + }
66240 +#endif
66241 +
66242 /* Fix up syms, so that st_value is a pointer to location. */
66243 err = simplify_symbols(mod, &info);
66244 if (err < 0)
66245 @@ -2886,13 +3016,6 @@ static struct module *load_module(void __user *umod,
66246
66247 flush_module_icache(mod);
66248
66249 - /* Now copy in args */
66250 - mod->args = strndup_user(uargs, ~0UL >> 1);
66251 - if (IS_ERR(mod->args)) {
66252 - err = PTR_ERR(mod->args);
66253 - goto free_arch_cleanup;
66254 - }
66255 -
66256 /* Mark state as coming so strong_try_module_get() ignores us. */
66257 mod->state = MODULE_STATE_COMING;
66258
66259 @@ -2949,11 +3072,10 @@ static struct module *load_module(void __user *umod,
66260 unlock:
66261 mutex_unlock(&module_mutex);
66262 synchronize_sched();
66263 - kfree(mod->args);
66264 - free_arch_cleanup:
66265 module_arch_cleanup(mod);
66266 free_modinfo:
66267 free_modinfo(mod);
66268 + kfree(mod->args);
66269 free_unload:
66270 module_unload_free(mod);
66271 free_module:
66272 @@ -2994,16 +3116,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66273 MODULE_STATE_COMING, mod);
66274
66275 /* Set RO and NX regions for core */
66276 - set_section_ro_nx(mod->module_core,
66277 - mod->core_text_size,
66278 - mod->core_ro_size,
66279 - mod->core_size);
66280 + set_section_ro_nx(mod->module_core_rx,
66281 + mod->core_size_rx,
66282 + mod->core_size_rx,
66283 + mod->core_size_rx);
66284
66285 /* Set RO and NX regions for init */
66286 - set_section_ro_nx(mod->module_init,
66287 - mod->init_text_size,
66288 - mod->init_ro_size,
66289 - mod->init_size);
66290 + set_section_ro_nx(mod->module_init_rx,
66291 + mod->init_size_rx,
66292 + mod->init_size_rx,
66293 + mod->init_size_rx);
66294
66295 do_mod_ctors(mod);
66296 /* Start the module */
66297 @@ -3049,11 +3171,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66298 mod->strtab = mod->core_strtab;
66299 #endif
66300 unset_module_init_ro_nx(mod);
66301 - module_free(mod, mod->module_init);
66302 - mod->module_init = NULL;
66303 - mod->init_size = 0;
66304 - mod->init_ro_size = 0;
66305 - mod->init_text_size = 0;
66306 + module_free(mod, mod->module_init_rw);
66307 + module_free_exec(mod, mod->module_init_rx);
66308 + mod->module_init_rw = NULL;
66309 + mod->module_init_rx = NULL;
66310 + mod->init_size_rw = 0;
66311 + mod->init_size_rx = 0;
66312 mutex_unlock(&module_mutex);
66313
66314 return 0;
66315 @@ -3084,10 +3207,16 @@ static const char *get_ksymbol(struct module *mod,
66316 unsigned long nextval;
66317
66318 /* At worse, next value is at end of module */
66319 - if (within_module_init(addr, mod))
66320 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66321 + if (within_module_init_rx(addr, mod))
66322 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66323 + else if (within_module_init_rw(addr, mod))
66324 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66325 + else if (within_module_core_rx(addr, mod))
66326 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66327 + else if (within_module_core_rw(addr, mod))
66328 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66329 else
66330 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66331 + return NULL;
66332
66333 /* Scan for closest preceding symbol, and next symbol. (ELF
66334 starts real symbols at 1). */
66335 @@ -3322,7 +3451,7 @@ static int m_show(struct seq_file *m, void *p)
66336 char buf[8];
66337
66338 seq_printf(m, "%s %u",
66339 - mod->name, mod->init_size + mod->core_size);
66340 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66341 print_unload_info(m, mod);
66342
66343 /* Informative for users. */
66344 @@ -3331,7 +3460,7 @@ static int m_show(struct seq_file *m, void *p)
66345 mod->state == MODULE_STATE_COMING ? "Loading":
66346 "Live");
66347 /* Used by oprofile and other similar tools. */
66348 - seq_printf(m, " 0x%pK", mod->module_core);
66349 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66350
66351 /* Taints info */
66352 if (mod->taints)
66353 @@ -3367,7 +3496,17 @@ static const struct file_operations proc_modules_operations = {
66354
66355 static int __init proc_modules_init(void)
66356 {
66357 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66358 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66359 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66360 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66361 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66362 +#else
66363 proc_create("modules", 0, NULL, &proc_modules_operations);
66364 +#endif
66365 +#else
66366 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66367 +#endif
66368 return 0;
66369 }
66370 module_init(proc_modules_init);
66371 @@ -3426,12 +3565,12 @@ struct module *__module_address(unsigned long addr)
66372 {
66373 struct module *mod;
66374
66375 - if (addr < module_addr_min || addr > module_addr_max)
66376 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66377 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66378 return NULL;
66379
66380 list_for_each_entry_rcu(mod, &modules, list)
66381 - if (within_module_core(addr, mod)
66382 - || within_module_init(addr, mod))
66383 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66384 return mod;
66385 return NULL;
66386 }
66387 @@ -3465,11 +3604,20 @@ bool is_module_text_address(unsigned long addr)
66388 */
66389 struct module *__module_text_address(unsigned long addr)
66390 {
66391 - struct module *mod = __module_address(addr);
66392 + struct module *mod;
66393 +
66394 +#ifdef CONFIG_X86_32
66395 + addr = ktla_ktva(addr);
66396 +#endif
66397 +
66398 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66399 + return NULL;
66400 +
66401 + mod = __module_address(addr);
66402 +
66403 if (mod) {
66404 /* Make sure it's within the text section. */
66405 - if (!within(addr, mod->module_init, mod->init_text_size)
66406 - && !within(addr, mod->module_core, mod->core_text_size))
66407 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66408 mod = NULL;
66409 }
66410 return mod;
66411 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66412 index 7e3443f..b2a1e6b 100644
66413 --- a/kernel/mutex-debug.c
66414 +++ b/kernel/mutex-debug.c
66415 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66416 }
66417
66418 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66419 - struct thread_info *ti)
66420 + struct task_struct *task)
66421 {
66422 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66423
66424 /* Mark the current thread as blocked on the lock: */
66425 - ti->task->blocked_on = waiter;
66426 + task->blocked_on = waiter;
66427 }
66428
66429 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66430 - struct thread_info *ti)
66431 + struct task_struct *task)
66432 {
66433 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66434 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66435 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66436 - ti->task->blocked_on = NULL;
66437 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66438 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66439 + task->blocked_on = NULL;
66440
66441 list_del_init(&waiter->list);
66442 waiter->task = NULL;
66443 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66444 index 0799fd3..d06ae3b 100644
66445 --- a/kernel/mutex-debug.h
66446 +++ b/kernel/mutex-debug.h
66447 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66448 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66449 extern void debug_mutex_add_waiter(struct mutex *lock,
66450 struct mutex_waiter *waiter,
66451 - struct thread_info *ti);
66452 + struct task_struct *task);
66453 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66454 - struct thread_info *ti);
66455 + struct task_struct *task);
66456 extern void debug_mutex_unlock(struct mutex *lock);
66457 extern void debug_mutex_init(struct mutex *lock, const char *name,
66458 struct lock_class_key *key);
66459 diff --git a/kernel/mutex.c b/kernel/mutex.c
66460 index 89096dd..f91ebc5 100644
66461 --- a/kernel/mutex.c
66462 +++ b/kernel/mutex.c
66463 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66464 spin_lock_mutex(&lock->wait_lock, flags);
66465
66466 debug_mutex_lock_common(lock, &waiter);
66467 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66468 + debug_mutex_add_waiter(lock, &waiter, task);
66469
66470 /* add waiting tasks to the end of the waitqueue (FIFO): */
66471 list_add_tail(&waiter.list, &lock->wait_list);
66472 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66473 * TASK_UNINTERRUPTIBLE case.)
66474 */
66475 if (unlikely(signal_pending_state(state, task))) {
66476 - mutex_remove_waiter(lock, &waiter,
66477 - task_thread_info(task));
66478 + mutex_remove_waiter(lock, &waiter, task);
66479 mutex_release(&lock->dep_map, 1, ip);
66480 spin_unlock_mutex(&lock->wait_lock, flags);
66481
66482 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66483 done:
66484 lock_acquired(&lock->dep_map, ip);
66485 /* got the lock - rejoice! */
66486 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66487 + mutex_remove_waiter(lock, &waiter, task);
66488 mutex_set_owner(lock);
66489
66490 /* set it to 0 if there are no waiters left: */
66491 diff --git a/kernel/padata.c b/kernel/padata.c
66492 index b452599..5d68f4e 100644
66493 --- a/kernel/padata.c
66494 +++ b/kernel/padata.c
66495 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
66496 padata->pd = pd;
66497 padata->cb_cpu = cb_cpu;
66498
66499 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
66500 - atomic_set(&pd->seq_nr, -1);
66501 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
66502 + atomic_set_unchecked(&pd->seq_nr, -1);
66503
66504 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
66505 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
66506
66507 target_cpu = padata_cpu_hash(padata);
66508 queue = per_cpu_ptr(pd->pqueue, target_cpu);
66509 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
66510 padata_init_pqueues(pd);
66511 padata_init_squeues(pd);
66512 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
66513 - atomic_set(&pd->seq_nr, -1);
66514 + atomic_set_unchecked(&pd->seq_nr, -1);
66515 atomic_set(&pd->reorder_objects, 0);
66516 atomic_set(&pd->refcnt, 0);
66517 pd->pinst = pinst;
66518 diff --git a/kernel/panic.c b/kernel/panic.c
66519 index 80aed44..f291d37 100644
66520 --- a/kernel/panic.c
66521 +++ b/kernel/panic.c
66522 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66523 const char *board;
66524
66525 printk(KERN_WARNING "------------[ cut here ]------------\n");
66526 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66527 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66528 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66529 if (board)
66530 printk(KERN_WARNING "Hardware name: %s\n", board);
66531 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66532 */
66533 void __stack_chk_fail(void)
66534 {
66535 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66536 + dump_stack();
66537 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66538 __builtin_return_address(0));
66539 }
66540 EXPORT_SYMBOL(__stack_chk_fail);
66541 diff --git a/kernel/pid.c b/kernel/pid.c
66542 index 9f08dfa..6765c40 100644
66543 --- a/kernel/pid.c
66544 +++ b/kernel/pid.c
66545 @@ -33,6 +33,7 @@
66546 #include <linux/rculist.h>
66547 #include <linux/bootmem.h>
66548 #include <linux/hash.h>
66549 +#include <linux/security.h>
66550 #include <linux/pid_namespace.h>
66551 #include <linux/init_task.h>
66552 #include <linux/syscalls.h>
66553 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66554
66555 int pid_max = PID_MAX_DEFAULT;
66556
66557 -#define RESERVED_PIDS 300
66558 +#define RESERVED_PIDS 500
66559
66560 int pid_max_min = RESERVED_PIDS + 1;
66561 int pid_max_max = PID_MAX_LIMIT;
66562 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66563 */
66564 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66565 {
66566 + struct task_struct *task;
66567 +
66568 rcu_lockdep_assert(rcu_read_lock_held(),
66569 "find_task_by_pid_ns() needs rcu_read_lock()"
66570 " protection");
66571 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66572 +
66573 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66574 +
66575 + if (gr_pid_is_chrooted(task))
66576 + return NULL;
66577 +
66578 + return task;
66579 }
66580
66581 struct task_struct *find_task_by_vpid(pid_t vnr)
66582 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66583 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66584 }
66585
66586 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66587 +{
66588 + rcu_lockdep_assert(rcu_read_lock_held(),
66589 + "find_task_by_pid_ns() needs rcu_read_lock()"
66590 + " protection");
66591 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66592 +}
66593 +
66594 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66595 {
66596 struct pid *pid;
66597 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66598 index 125cb67..a4d1c30 100644
66599 --- a/kernel/posix-cpu-timers.c
66600 +++ b/kernel/posix-cpu-timers.c
66601 @@ -6,6 +6,7 @@
66602 #include <linux/posix-timers.h>
66603 #include <linux/errno.h>
66604 #include <linux/math64.h>
66605 +#include <linux/security.h>
66606 #include <asm/uaccess.h>
66607 #include <linux/kernel_stat.h>
66608 #include <trace/events/timer.h>
66609 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66610
66611 static __init int init_posix_cpu_timers(void)
66612 {
66613 - struct k_clock process = {
66614 + static struct k_clock process = {
66615 .clock_getres = process_cpu_clock_getres,
66616 .clock_get = process_cpu_clock_get,
66617 .timer_create = process_cpu_timer_create,
66618 .nsleep = process_cpu_nsleep,
66619 .nsleep_restart = process_cpu_nsleep_restart,
66620 };
66621 - struct k_clock thread = {
66622 + static struct k_clock thread = {
66623 .clock_getres = thread_cpu_clock_getres,
66624 .clock_get = thread_cpu_clock_get,
66625 .timer_create = thread_cpu_timer_create,
66626 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66627 index 69185ae..cc2847a 100644
66628 --- a/kernel/posix-timers.c
66629 +++ b/kernel/posix-timers.c
66630 @@ -43,6 +43,7 @@
66631 #include <linux/idr.h>
66632 #include <linux/posix-clock.h>
66633 #include <linux/posix-timers.h>
66634 +#include <linux/grsecurity.h>
66635 #include <linux/syscalls.h>
66636 #include <linux/wait.h>
66637 #include <linux/workqueue.h>
66638 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66639 * which we beg off on and pass to do_sys_settimeofday().
66640 */
66641
66642 -static struct k_clock posix_clocks[MAX_CLOCKS];
66643 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66644
66645 /*
66646 * These ones are defined below.
66647 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66648 */
66649 static __init int init_posix_timers(void)
66650 {
66651 - struct k_clock clock_realtime = {
66652 + static struct k_clock clock_realtime = {
66653 .clock_getres = hrtimer_get_res,
66654 .clock_get = posix_clock_realtime_get,
66655 .clock_set = posix_clock_realtime_set,
66656 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66657 .timer_get = common_timer_get,
66658 .timer_del = common_timer_del,
66659 };
66660 - struct k_clock clock_monotonic = {
66661 + static struct k_clock clock_monotonic = {
66662 .clock_getres = hrtimer_get_res,
66663 .clock_get = posix_ktime_get_ts,
66664 .nsleep = common_nsleep,
66665 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66666 .timer_get = common_timer_get,
66667 .timer_del = common_timer_del,
66668 };
66669 - struct k_clock clock_monotonic_raw = {
66670 + static struct k_clock clock_monotonic_raw = {
66671 .clock_getres = hrtimer_get_res,
66672 .clock_get = posix_get_monotonic_raw,
66673 };
66674 - struct k_clock clock_realtime_coarse = {
66675 + static struct k_clock clock_realtime_coarse = {
66676 .clock_getres = posix_get_coarse_res,
66677 .clock_get = posix_get_realtime_coarse,
66678 };
66679 - struct k_clock clock_monotonic_coarse = {
66680 + static struct k_clock clock_monotonic_coarse = {
66681 .clock_getres = posix_get_coarse_res,
66682 .clock_get = posix_get_monotonic_coarse,
66683 };
66684 - struct k_clock clock_boottime = {
66685 + static struct k_clock clock_boottime = {
66686 .clock_getres = hrtimer_get_res,
66687 .clock_get = posix_get_boottime,
66688 .nsleep = common_nsleep,
66689 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
66690 return;
66691 }
66692
66693 - posix_clocks[clock_id] = *new_clock;
66694 + posix_clocks[clock_id] = new_clock;
66695 }
66696 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66697
66698 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66699 return (id & CLOCKFD_MASK) == CLOCKFD ?
66700 &clock_posix_dynamic : &clock_posix_cpu;
66701
66702 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
66703 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
66704 return NULL;
66705 - return &posix_clocks[id];
66706 + return posix_clocks[id];
66707 }
66708
66709 static int common_timer_create(struct k_itimer *new_timer)
66710 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
66711 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
66712 return -EFAULT;
66713
66714 + /* only the CLOCK_REALTIME clock can be set, all other clocks
66715 + have their clock_set fptr set to a nosettime dummy function
66716 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
66717 + call common_clock_set, which calls do_sys_settimeofday, which
66718 + we hook
66719 + */
66720 +
66721 return kc->clock_set(which_clock, &new_tp);
66722 }
66723
66724 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
66725 index d523593..68197a4 100644
66726 --- a/kernel/power/poweroff.c
66727 +++ b/kernel/power/poweroff.c
66728 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
66729 .enable_mask = SYSRQ_ENABLE_BOOT,
66730 };
66731
66732 -static int pm_sysrq_init(void)
66733 +static int __init pm_sysrq_init(void)
66734 {
66735 register_sysrq_key('o', &sysrq_poweroff_op);
66736 return 0;
66737 diff --git a/kernel/power/process.c b/kernel/power/process.c
66738 index 7e42645..3d43df1 100644
66739 --- a/kernel/power/process.c
66740 +++ b/kernel/power/process.c
66741 @@ -32,6 +32,7 @@ static int try_to_freeze_tasks(bool user_only)
66742 u64 elapsed_csecs64;
66743 unsigned int elapsed_csecs;
66744 bool wakeup = false;
66745 + bool timedout = false;
66746
66747 do_gettimeofday(&start);
66748
66749 @@ -42,6 +43,8 @@ static int try_to_freeze_tasks(bool user_only)
66750
66751 while (true) {
66752 todo = 0;
66753 + if (time_after(jiffies, end_time))
66754 + timedout = true;
66755 read_lock(&tasklist_lock);
66756 do_each_thread(g, p) {
66757 if (p == current || !freeze_task(p))
66758 @@ -59,9 +62,13 @@ static int try_to_freeze_tasks(bool user_only)
66759 * try_to_stop() after schedule() in ptrace/signal
66760 * stop sees TIF_FREEZE.
66761 */
66762 - if (!task_is_stopped_or_traced(p) &&
66763 - !freezer_should_skip(p))
66764 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
66765 todo++;
66766 + if (timedout) {
66767 + printk(KERN_ERR "Task refusing to freeze:\n");
66768 + sched_show_task(p);
66769 + }
66770 + }
66771 } while_each_thread(g, p);
66772 read_unlock(&tasklist_lock);
66773
66774 @@ -70,7 +77,7 @@ static int try_to_freeze_tasks(bool user_only)
66775 todo += wq_busy;
66776 }
66777
66778 - if (!todo || time_after(jiffies, end_time))
66779 + if (!todo || timedout)
66780 break;
66781
66782 if (pm_wakeup_pending()) {
66783 diff --git a/kernel/printk.c b/kernel/printk.c
66784 index 32690a0..cd7c798 100644
66785 --- a/kernel/printk.c
66786 +++ b/kernel/printk.c
66787 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
66788 if (from_file && type != SYSLOG_ACTION_OPEN)
66789 return 0;
66790
66791 +#ifdef CONFIG_GRKERNSEC_DMESG
66792 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
66793 + return -EPERM;
66794 +#endif
66795 +
66796 if (syslog_action_restricted(type)) {
66797 if (capable(CAP_SYSLOG))
66798 return 0;
66799 diff --git a/kernel/profile.c b/kernel/profile.c
66800 index 76b8e77..a2930e8 100644
66801 --- a/kernel/profile.c
66802 +++ b/kernel/profile.c
66803 @@ -39,7 +39,7 @@ struct profile_hit {
66804 /* Oprofile timer tick hook */
66805 static int (*timer_hook)(struct pt_regs *) __read_mostly;
66806
66807 -static atomic_t *prof_buffer;
66808 +static atomic_unchecked_t *prof_buffer;
66809 static unsigned long prof_len, prof_shift;
66810
66811 int prof_on __read_mostly;
66812 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
66813 hits[i].pc = 0;
66814 continue;
66815 }
66816 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66817 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66818 hits[i].hits = hits[i].pc = 0;
66819 }
66820 }
66821 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66822 * Add the current hit(s) and flush the write-queue out
66823 * to the global buffer:
66824 */
66825 - atomic_add(nr_hits, &prof_buffer[pc]);
66826 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
66827 for (i = 0; i < NR_PROFILE_HIT; ++i) {
66828 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66829 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66830 hits[i].pc = hits[i].hits = 0;
66831 }
66832 out:
66833 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66834 {
66835 unsigned long pc;
66836 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
66837 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
66838 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
66839 }
66840 #endif /* !CONFIG_SMP */
66841
66842 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
66843 return -EFAULT;
66844 buf++; p++; count--; read++;
66845 }
66846 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
66847 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
66848 if (copy_to_user(buf, (void *)pnt, count))
66849 return -EFAULT;
66850 read += count;
66851 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
66852 }
66853 #endif
66854 profile_discard_flip_buffers();
66855 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
66856 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
66857 return count;
66858 }
66859
66860 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
66861 index 00ab2ca..d237f61 100644
66862 --- a/kernel/ptrace.c
66863 +++ b/kernel/ptrace.c
66864 @@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
66865 task->ptrace = PT_PTRACED;
66866 if (seize)
66867 task->ptrace |= PT_SEIZED;
66868 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
66869 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
66870 task->ptrace |= PT_PTRACE_CAP;
66871
66872 __ptrace_link(task, current);
66873 @@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
66874 break;
66875 return -EIO;
66876 }
66877 - if (copy_to_user(dst, buf, retval))
66878 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
66879 return -EFAULT;
66880 copied += retval;
66881 src += retval;
66882 @@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
66883 bool seized = child->ptrace & PT_SEIZED;
66884 int ret = -EIO;
66885 siginfo_t siginfo, *si;
66886 - void __user *datavp = (void __user *) data;
66887 + void __user *datavp = (__force void __user *) data;
66888 unsigned long __user *datalp = datavp;
66889 unsigned long flags;
66890
66891 @@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
66892 goto out;
66893 }
66894
66895 + if (gr_handle_ptrace(child, request)) {
66896 + ret = -EPERM;
66897 + goto out_put_task_struct;
66898 + }
66899 +
66900 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66901 ret = ptrace_attach(child, request, data);
66902 /*
66903 * Some architectures need to do book-keeping after
66904 * a ptrace attach.
66905 */
66906 - if (!ret)
66907 + if (!ret) {
66908 arch_ptrace_attach(child);
66909 + gr_audit_ptrace(child);
66910 + }
66911 goto out_put_task_struct;
66912 }
66913
66914 @@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
66915 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
66916 if (copied != sizeof(tmp))
66917 return -EIO;
66918 - return put_user(tmp, (unsigned long __user *)data);
66919 + return put_user(tmp, (__force unsigned long __user *)data);
66920 }
66921
66922 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
66923 @@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
66924 goto out;
66925 }
66926
66927 + if (gr_handle_ptrace(child, request)) {
66928 + ret = -EPERM;
66929 + goto out_put_task_struct;
66930 + }
66931 +
66932 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66933 ret = ptrace_attach(child, request, data);
66934 /*
66935 * Some architectures need to do book-keeping after
66936 * a ptrace attach.
66937 */
66938 - if (!ret)
66939 + if (!ret) {
66940 arch_ptrace_attach(child);
66941 + gr_audit_ptrace(child);
66942 + }
66943 goto out_put_task_struct;
66944 }
66945
66946 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
66947 index 977296d..c4744dc 100644
66948 --- a/kernel/rcutiny.c
66949 +++ b/kernel/rcutiny.c
66950 @@ -46,7 +46,7 @@
66951 struct rcu_ctrlblk;
66952 static void invoke_rcu_callbacks(void);
66953 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
66954 -static void rcu_process_callbacks(struct softirq_action *unused);
66955 +static void rcu_process_callbacks(void);
66956 static void __call_rcu(struct rcu_head *head,
66957 void (*func)(struct rcu_head *rcu),
66958 struct rcu_ctrlblk *rcp);
66959 @@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
66960 rcu_is_callbacks_kthread()));
66961 }
66962
66963 -static void rcu_process_callbacks(struct softirq_action *unused)
66964 +static void rcu_process_callbacks(void)
66965 {
66966 __rcu_process_callbacks(&rcu_sched_ctrlblk);
66967 __rcu_process_callbacks(&rcu_bh_ctrlblk);
66968 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
66969 index 9cb1ae4..aac7d3e 100644
66970 --- a/kernel/rcutiny_plugin.h
66971 +++ b/kernel/rcutiny_plugin.h
66972 @@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
66973 have_rcu_kthread_work = morework;
66974 local_irq_restore(flags);
66975 if (work)
66976 - rcu_process_callbacks(NULL);
66977 + rcu_process_callbacks();
66978 schedule_timeout_interruptible(1); /* Leave CPU for others. */
66979 }
66980
66981 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
66982 index a58ac28..196a3d8 100644
66983 --- a/kernel/rcutorture.c
66984 +++ b/kernel/rcutorture.c
66985 @@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
66986 { 0 };
66987 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
66988 { 0 };
66989 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66990 -static atomic_t n_rcu_torture_alloc;
66991 -static atomic_t n_rcu_torture_alloc_fail;
66992 -static atomic_t n_rcu_torture_free;
66993 -static atomic_t n_rcu_torture_mberror;
66994 -static atomic_t n_rcu_torture_error;
66995 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66996 +static atomic_unchecked_t n_rcu_torture_alloc;
66997 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
66998 +static atomic_unchecked_t n_rcu_torture_free;
66999 +static atomic_unchecked_t n_rcu_torture_mberror;
67000 +static atomic_unchecked_t n_rcu_torture_error;
67001 static long n_rcu_torture_boost_ktrerror;
67002 static long n_rcu_torture_boost_rterror;
67003 static long n_rcu_torture_boost_failure;
67004 @@ -243,11 +243,11 @@ rcu_torture_alloc(void)
67005
67006 spin_lock_bh(&rcu_torture_lock);
67007 if (list_empty(&rcu_torture_freelist)) {
67008 - atomic_inc(&n_rcu_torture_alloc_fail);
67009 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67010 spin_unlock_bh(&rcu_torture_lock);
67011 return NULL;
67012 }
67013 - atomic_inc(&n_rcu_torture_alloc);
67014 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67015 p = rcu_torture_freelist.next;
67016 list_del_init(p);
67017 spin_unlock_bh(&rcu_torture_lock);
67018 @@ -260,7 +260,7 @@ rcu_torture_alloc(void)
67019 static void
67020 rcu_torture_free(struct rcu_torture *p)
67021 {
67022 - atomic_inc(&n_rcu_torture_free);
67023 + atomic_inc_unchecked(&n_rcu_torture_free);
67024 spin_lock_bh(&rcu_torture_lock);
67025 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67026 spin_unlock_bh(&rcu_torture_lock);
67027 @@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
67028 i = rp->rtort_pipe_count;
67029 if (i > RCU_TORTURE_PIPE_LEN)
67030 i = RCU_TORTURE_PIPE_LEN;
67031 - atomic_inc(&rcu_torture_wcount[i]);
67032 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67033 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67034 rp->rtort_mbtest = 0;
67035 rcu_torture_free(rp);
67036 @@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67037 i = rp->rtort_pipe_count;
67038 if (i > RCU_TORTURE_PIPE_LEN)
67039 i = RCU_TORTURE_PIPE_LEN;
67040 - atomic_inc(&rcu_torture_wcount[i]);
67041 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67042 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67043 rp->rtort_mbtest = 0;
67044 list_del(&rp->rtort_free);
67045 @@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
67046 i = old_rp->rtort_pipe_count;
67047 if (i > RCU_TORTURE_PIPE_LEN)
67048 i = RCU_TORTURE_PIPE_LEN;
67049 - atomic_inc(&rcu_torture_wcount[i]);
67050 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67051 old_rp->rtort_pipe_count++;
67052 cur_ops->deferred_free(old_rp);
67053 }
67054 @@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
67055 return;
67056 }
67057 if (p->rtort_mbtest == 0)
67058 - atomic_inc(&n_rcu_torture_mberror);
67059 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67060 spin_lock(&rand_lock);
67061 cur_ops->read_delay(&rand);
67062 n_rcu_torture_timers++;
67063 @@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
67064 continue;
67065 }
67066 if (p->rtort_mbtest == 0)
67067 - atomic_inc(&n_rcu_torture_mberror);
67068 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67069 cur_ops->read_delay(&rand);
67070 preempt_disable();
67071 pipe_count = p->rtort_pipe_count;
67072 @@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
67073 rcu_torture_current,
67074 rcu_torture_current_version,
67075 list_empty(&rcu_torture_freelist),
67076 - atomic_read(&n_rcu_torture_alloc),
67077 - atomic_read(&n_rcu_torture_alloc_fail),
67078 - atomic_read(&n_rcu_torture_free),
67079 - atomic_read(&n_rcu_torture_mberror),
67080 + atomic_read_unchecked(&n_rcu_torture_alloc),
67081 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67082 + atomic_read_unchecked(&n_rcu_torture_free),
67083 + atomic_read_unchecked(&n_rcu_torture_mberror),
67084 n_rcu_torture_boost_ktrerror,
67085 n_rcu_torture_boost_rterror,
67086 n_rcu_torture_boost_failure,
67087 @@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
67088 n_online_attempts,
67089 n_offline_successes,
67090 n_offline_attempts);
67091 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67092 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67093 n_rcu_torture_boost_ktrerror != 0 ||
67094 n_rcu_torture_boost_rterror != 0 ||
67095 n_rcu_torture_boost_failure != 0)
67096 @@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
67097 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67098 if (i > 1) {
67099 cnt += sprintf(&page[cnt], "!!! ");
67100 - atomic_inc(&n_rcu_torture_error);
67101 + atomic_inc_unchecked(&n_rcu_torture_error);
67102 WARN_ON_ONCE(1);
67103 }
67104 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67105 @@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
67106 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67107 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67108 cnt += sprintf(&page[cnt], " %d",
67109 - atomic_read(&rcu_torture_wcount[i]));
67110 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67111 }
67112 cnt += sprintf(&page[cnt], "\n");
67113 if (cur_ops->stats)
67114 @@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
67115
67116 if (cur_ops->cleanup)
67117 cur_ops->cleanup();
67118 - if (atomic_read(&n_rcu_torture_error))
67119 + if (atomic_read_unchecked(&n_rcu_torture_error))
67120 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67121 else
67122 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67123 @@ -1664,17 +1664,17 @@ rcu_torture_init(void)
67124
67125 rcu_torture_current = NULL;
67126 rcu_torture_current_version = 0;
67127 - atomic_set(&n_rcu_torture_alloc, 0);
67128 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67129 - atomic_set(&n_rcu_torture_free, 0);
67130 - atomic_set(&n_rcu_torture_mberror, 0);
67131 - atomic_set(&n_rcu_torture_error, 0);
67132 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67133 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67134 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67135 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67136 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67137 n_rcu_torture_boost_ktrerror = 0;
67138 n_rcu_torture_boost_rterror = 0;
67139 n_rcu_torture_boost_failure = 0;
67140 n_rcu_torture_boosts = 0;
67141 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67142 - atomic_set(&rcu_torture_wcount[i], 0);
67143 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67144 for_each_possible_cpu(cpu) {
67145 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67146 per_cpu(rcu_torture_count, cpu)[i] = 0;
67147 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67148 index 6c4a672..70f3202 100644
67149 --- a/kernel/rcutree.c
67150 +++ b/kernel/rcutree.c
67151 @@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67152 rcu_prepare_for_idle(smp_processor_id());
67153 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67154 smp_mb__before_atomic_inc(); /* See above. */
67155 - atomic_inc(&rdtp->dynticks);
67156 + atomic_inc_unchecked(&rdtp->dynticks);
67157 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67158 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67159 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67160 }
67161
67162 /**
67163 @@ -438,10 +438,10 @@ void rcu_irq_exit(void)
67164 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67165 {
67166 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67167 - atomic_inc(&rdtp->dynticks);
67168 + atomic_inc_unchecked(&rdtp->dynticks);
67169 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67170 smp_mb__after_atomic_inc(); /* See above. */
67171 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67172 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67173 rcu_cleanup_after_idle(smp_processor_id());
67174 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67175 if (!is_idle_task(current)) {
67176 @@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
67177 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67178
67179 if (rdtp->dynticks_nmi_nesting == 0 &&
67180 - (atomic_read(&rdtp->dynticks) & 0x1))
67181 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67182 return;
67183 rdtp->dynticks_nmi_nesting++;
67184 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67185 - atomic_inc(&rdtp->dynticks);
67186 + atomic_inc_unchecked(&rdtp->dynticks);
67187 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67188 smp_mb__after_atomic_inc(); /* See above. */
67189 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67190 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67191 }
67192
67193 /**
67194 @@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
67195 return;
67196 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67197 smp_mb__before_atomic_inc(); /* See above. */
67198 - atomic_inc(&rdtp->dynticks);
67199 + atomic_inc_unchecked(&rdtp->dynticks);
67200 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67201 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67202 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67203 }
67204
67205 #ifdef CONFIG_PROVE_RCU
67206 @@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
67207 int ret;
67208
67209 preempt_disable();
67210 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67211 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67212 preempt_enable();
67213 return ret;
67214 }
67215 @@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67216 */
67217 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67218 {
67219 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67220 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67221 return (rdp->dynticks_snap & 0x1) == 0;
67222 }
67223
67224 @@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67225 unsigned int curr;
67226 unsigned int snap;
67227
67228 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67229 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67230 snap = (unsigned int)rdp->dynticks_snap;
67231
67232 /*
67233 @@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67234 /*
67235 * Do RCU core processing for the current CPU.
67236 */
67237 -static void rcu_process_callbacks(struct softirq_action *unused)
67238 +static void rcu_process_callbacks(void)
67239 {
67240 trace_rcu_utilization("Start RCU core");
67241 __rcu_process_callbacks(&rcu_sched_state,
67242 @@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67243 rdp->qlen = 0;
67244 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67245 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
67246 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67247 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67248 rdp->cpu = cpu;
67249 rdp->rsp = rsp;
67250 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67251 @@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67252 rdp->n_force_qs_snap = rsp->n_force_qs;
67253 rdp->blimit = blimit;
67254 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
67255 - atomic_set(&rdp->dynticks->dynticks,
67256 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67257 + atomic_set_unchecked(&rdp->dynticks->dynticks,
67258 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67259 rcu_prepare_for_idle_init(cpu);
67260 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67261
67262 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67263 index fddff92..2c08359 100644
67264 --- a/kernel/rcutree.h
67265 +++ b/kernel/rcutree.h
67266 @@ -87,7 +87,7 @@ struct rcu_dynticks {
67267 long long dynticks_nesting; /* Track irq/process nesting level. */
67268 /* Process level is worth LLONG_MAX/2. */
67269 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67270 - atomic_t dynticks; /* Even value for idle, else odd. */
67271 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67272 };
67273
67274 /* RCU's kthread states for tracing. */
67275 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67276 index 8bb35d7..6ea0a463 100644
67277 --- a/kernel/rcutree_plugin.h
67278 +++ b/kernel/rcutree_plugin.h
67279 @@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
67280
67281 /* Clean up and exit. */
67282 smp_mb(); /* ensure expedited GP seen before counter increment. */
67283 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67284 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67285 unlock_mb_ret:
67286 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67287 mb_ret:
67288 @@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
67289
67290 #else /* #ifndef CONFIG_SMP */
67291
67292 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67293 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67294 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67295 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67296
67297 static int synchronize_sched_expedited_cpu_stop(void *data)
67298 {
67299 @@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
67300 int firstsnap, s, snap, trycount = 0;
67301
67302 /* Note that atomic_inc_return() implies full memory barrier. */
67303 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67304 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67305 get_online_cpus();
67306
67307 /*
67308 @@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
67309 }
67310
67311 /* Check to see if someone else did our work for us. */
67312 - s = atomic_read(&sync_sched_expedited_done);
67313 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67314 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67315 smp_mb(); /* ensure test happens before caller kfree */
67316 return;
67317 @@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
67318 * grace period works for us.
67319 */
67320 get_online_cpus();
67321 - snap = atomic_read(&sync_sched_expedited_started);
67322 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
67323 smp_mb(); /* ensure read is before try_stop_cpus(). */
67324 }
67325
67326 @@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
67327 * than we did beat us to the punch.
67328 */
67329 do {
67330 - s = atomic_read(&sync_sched_expedited_done);
67331 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67332 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67333 smp_mb(); /* ensure test happens before caller kfree */
67334 break;
67335 }
67336 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67337 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67338
67339 put_online_cpus();
67340 }
67341 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67342 index 654cfe6..c0b28e2 100644
67343 --- a/kernel/rcutree_trace.c
67344 +++ b/kernel/rcutree_trace.c
67345 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67346 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67347 rdp->qs_pending);
67348 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67349 - atomic_read(&rdp->dynticks->dynticks),
67350 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67351 rdp->dynticks->dynticks_nesting,
67352 rdp->dynticks->dynticks_nmi_nesting,
67353 rdp->dynticks_fqs);
67354 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67355 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67356 rdp->qs_pending);
67357 seq_printf(m, ",%d,%llx,%d,%lu",
67358 - atomic_read(&rdp->dynticks->dynticks),
67359 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67360 rdp->dynticks->dynticks_nesting,
67361 rdp->dynticks->dynticks_nmi_nesting,
67362 rdp->dynticks_fqs);
67363 diff --git a/kernel/resource.c b/kernel/resource.c
67364 index 7640b3a..5879283 100644
67365 --- a/kernel/resource.c
67366 +++ b/kernel/resource.c
67367 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67368
67369 static int __init ioresources_init(void)
67370 {
67371 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67372 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67373 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67374 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67375 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67376 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67377 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67378 +#endif
67379 +#else
67380 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67381 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67382 +#endif
67383 return 0;
67384 }
67385 __initcall(ioresources_init);
67386 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67387 index 98ec494..4241d6d 100644
67388 --- a/kernel/rtmutex-tester.c
67389 +++ b/kernel/rtmutex-tester.c
67390 @@ -20,7 +20,7 @@
67391 #define MAX_RT_TEST_MUTEXES 8
67392
67393 static spinlock_t rttest_lock;
67394 -static atomic_t rttest_event;
67395 +static atomic_unchecked_t rttest_event;
67396
67397 struct test_thread_data {
67398 int opcode;
67399 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67400
67401 case RTTEST_LOCKCONT:
67402 td->mutexes[td->opdata] = 1;
67403 - td->event = atomic_add_return(1, &rttest_event);
67404 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67405 return 0;
67406
67407 case RTTEST_RESET:
67408 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67409 return 0;
67410
67411 case RTTEST_RESETEVENT:
67412 - atomic_set(&rttest_event, 0);
67413 + atomic_set_unchecked(&rttest_event, 0);
67414 return 0;
67415
67416 default:
67417 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67418 return ret;
67419
67420 td->mutexes[id] = 1;
67421 - td->event = atomic_add_return(1, &rttest_event);
67422 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67423 rt_mutex_lock(&mutexes[id]);
67424 - td->event = atomic_add_return(1, &rttest_event);
67425 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67426 td->mutexes[id] = 4;
67427 return 0;
67428
67429 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67430 return ret;
67431
67432 td->mutexes[id] = 1;
67433 - td->event = atomic_add_return(1, &rttest_event);
67434 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67435 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67436 - td->event = atomic_add_return(1, &rttest_event);
67437 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67438 td->mutexes[id] = ret ? 0 : 4;
67439 return ret ? -EINTR : 0;
67440
67441 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67442 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67443 return ret;
67444
67445 - td->event = atomic_add_return(1, &rttest_event);
67446 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67447 rt_mutex_unlock(&mutexes[id]);
67448 - td->event = atomic_add_return(1, &rttest_event);
67449 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67450 td->mutexes[id] = 0;
67451 return 0;
67452
67453 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67454 break;
67455
67456 td->mutexes[dat] = 2;
67457 - td->event = atomic_add_return(1, &rttest_event);
67458 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67459 break;
67460
67461 default:
67462 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67463 return;
67464
67465 td->mutexes[dat] = 3;
67466 - td->event = atomic_add_return(1, &rttest_event);
67467 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67468 break;
67469
67470 case RTTEST_LOCKNOWAIT:
67471 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67472 return;
67473
67474 td->mutexes[dat] = 1;
67475 - td->event = atomic_add_return(1, &rttest_event);
67476 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67477 return;
67478
67479 default:
67480 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67481 index e8a1f83..363d17d 100644
67482 --- a/kernel/sched/auto_group.c
67483 +++ b/kernel/sched/auto_group.c
67484 @@ -11,7 +11,7 @@
67485
67486 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67487 static struct autogroup autogroup_default;
67488 -static atomic_t autogroup_seq_nr;
67489 +static atomic_unchecked_t autogroup_seq_nr;
67490
67491 void __init autogroup_init(struct task_struct *init_task)
67492 {
67493 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67494
67495 kref_init(&ag->kref);
67496 init_rwsem(&ag->lock);
67497 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67498 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67499 ag->tg = tg;
67500 #ifdef CONFIG_RT_GROUP_SCHED
67501 /*
67502 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67503 index b342f57..00324a0 100644
67504 --- a/kernel/sched/core.c
67505 +++ b/kernel/sched/core.c
67506 @@ -3143,6 +3143,19 @@ pick_next_task(struct rq *rq)
67507 BUG(); /* the idle class will always have a runnable task */
67508 }
67509
67510 +#ifdef CONFIG_GRKERNSEC_SETXID
67511 +extern void gr_delayed_cred_worker(void);
67512 +static inline void gr_cred_schedule(void)
67513 +{
67514 + if (unlikely(current->delayed_cred))
67515 + gr_delayed_cred_worker();
67516 +}
67517 +#else
67518 +static inline void gr_cred_schedule(void)
67519 +{
67520 +}
67521 +#endif
67522 +
67523 /*
67524 * __schedule() is the main scheduler function.
67525 */
67526 @@ -3162,6 +3175,8 @@ need_resched:
67527
67528 schedule_debug(prev);
67529
67530 + gr_cred_schedule();
67531 +
67532 if (sched_feat(HRTICK))
67533 hrtick_clear(rq);
67534
67535 @@ -3852,6 +3867,8 @@ int can_nice(const struct task_struct *p, const int nice)
67536 /* convert nice value [19,-20] to rlimit style value [1,40] */
67537 int nice_rlim = 20 - nice;
67538
67539 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67540 +
67541 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67542 capable(CAP_SYS_NICE));
67543 }
67544 @@ -3885,7 +3902,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67545 if (nice > 19)
67546 nice = 19;
67547
67548 - if (increment < 0 && !can_nice(current, nice))
67549 + if (increment < 0 && (!can_nice(current, nice) ||
67550 + gr_handle_chroot_nice()))
67551 return -EPERM;
67552
67553 retval = security_task_setnice(current, nice);
67554 @@ -4042,6 +4060,7 @@ recheck:
67555 unsigned long rlim_rtprio =
67556 task_rlimit(p, RLIMIT_RTPRIO);
67557
67558 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67559 /* can't set/change the rt policy */
67560 if (policy != p->policy && !rlim_rtprio)
67561 return -EPERM;
67562 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67563 index aca16b8..8e3acc4 100644
67564 --- a/kernel/sched/fair.c
67565 +++ b/kernel/sched/fair.c
67566 @@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67567 * run_rebalance_domains is triggered when needed from the scheduler tick.
67568 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67569 */
67570 -static void run_rebalance_domains(struct softirq_action *h)
67571 +static void run_rebalance_domains(void)
67572 {
67573 int this_cpu = smp_processor_id();
67574 struct rq *this_rq = cpu_rq(this_cpu);
67575 diff --git a/kernel/signal.c b/kernel/signal.c
67576 index c73c428..7040057 100644
67577 --- a/kernel/signal.c
67578 +++ b/kernel/signal.c
67579 @@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
67580
67581 int print_fatal_signals __read_mostly;
67582
67583 -static void __user *sig_handler(struct task_struct *t, int sig)
67584 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67585 {
67586 return t->sighand->action[sig - 1].sa.sa_handler;
67587 }
67588
67589 -static int sig_handler_ignored(void __user *handler, int sig)
67590 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67591 {
67592 /* Is it explicitly or implicitly ignored? */
67593 return handler == SIG_IGN ||
67594 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67595 static int sig_task_ignored(struct task_struct *t, int sig,
67596 int from_ancestor_ns)
67597 {
67598 - void __user *handler;
67599 + __sighandler_t handler;
67600
67601 handler = sig_handler(t, sig);
67602
67603 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67604 atomic_inc(&user->sigpending);
67605 rcu_read_unlock();
67606
67607 + if (!override_rlimit)
67608 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67609 +
67610 if (override_rlimit ||
67611 atomic_read(&user->sigpending) <=
67612 task_rlimit(t, RLIMIT_SIGPENDING)) {
67613 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67614
67615 int unhandled_signal(struct task_struct *tsk, int sig)
67616 {
67617 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67618 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67619 if (is_global_init(tsk))
67620 return 1;
67621 if (handler != SIG_IGN && handler != SIG_DFL)
67622 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67623 }
67624 }
67625
67626 + /* allow glibc communication via tgkill to other threads in our
67627 + thread group */
67628 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67629 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67630 + && gr_handle_signal(t, sig))
67631 + return -EPERM;
67632 +
67633 return security_task_kill(t, info, sig, 0);
67634 }
67635
67636 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67637 return send_signal(sig, info, p, 1);
67638 }
67639
67640 -static int
67641 +int
67642 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67643 {
67644 return send_signal(sig, info, t, 0);
67645 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67646 unsigned long int flags;
67647 int ret, blocked, ignored;
67648 struct k_sigaction *action;
67649 + int is_unhandled = 0;
67650
67651 spin_lock_irqsave(&t->sighand->siglock, flags);
67652 action = &t->sighand->action[sig-1];
67653 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67654 }
67655 if (action->sa.sa_handler == SIG_DFL)
67656 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67657 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67658 + is_unhandled = 1;
67659 ret = specific_send_sig_info(sig, info, t);
67660 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67661
67662 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67663 + normal operation */
67664 + if (is_unhandled) {
67665 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67666 + gr_handle_crash(t, sig);
67667 + }
67668 +
67669 return ret;
67670 }
67671
67672 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67673 ret = check_kill_permission(sig, info, p);
67674 rcu_read_unlock();
67675
67676 - if (!ret && sig)
67677 + if (!ret && sig) {
67678 ret = do_send_sig_info(sig, info, p, true);
67679 + if (!ret)
67680 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67681 + }
67682
67683 return ret;
67684 }
67685 @@ -2820,7 +2843,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67686 int error = -ESRCH;
67687
67688 rcu_read_lock();
67689 - p = find_task_by_vpid(pid);
67690 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67691 + /* allow glibc communication via tgkill to other threads in our
67692 + thread group */
67693 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
67694 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
67695 + p = find_task_by_vpid_unrestricted(pid);
67696 + else
67697 +#endif
67698 + p = find_task_by_vpid(pid);
67699 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
67700 error = check_kill_permission(sig, info, p);
67701 /*
67702 diff --git a/kernel/smp.c b/kernel/smp.c
67703 index db197d6..17aef0b 100644
67704 --- a/kernel/smp.c
67705 +++ b/kernel/smp.c
67706 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
67707 }
67708 EXPORT_SYMBOL(smp_call_function);
67709
67710 -void ipi_call_lock(void)
67711 +void ipi_call_lock(void) __acquires(call_function.lock)
67712 {
67713 raw_spin_lock(&call_function.lock);
67714 }
67715
67716 -void ipi_call_unlock(void)
67717 +void ipi_call_unlock(void) __releases(call_function.lock)
67718 {
67719 raw_spin_unlock(&call_function.lock);
67720 }
67721
67722 -void ipi_call_lock_irq(void)
67723 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
67724 {
67725 raw_spin_lock_irq(&call_function.lock);
67726 }
67727
67728 -void ipi_call_unlock_irq(void)
67729 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
67730 {
67731 raw_spin_unlock_irq(&call_function.lock);
67732 }
67733 diff --git a/kernel/softirq.c b/kernel/softirq.c
67734 index 4eb3a0f..6f1fa81 100644
67735 --- a/kernel/softirq.c
67736 +++ b/kernel/softirq.c
67737 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
67738
67739 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
67740
67741 -char *softirq_to_name[NR_SOFTIRQS] = {
67742 +const char * const softirq_to_name[NR_SOFTIRQS] = {
67743 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
67744 "TASKLET", "SCHED", "HRTIMER", "RCU"
67745 };
67746 @@ -235,7 +235,7 @@ restart:
67747 kstat_incr_softirqs_this_cpu(vec_nr);
67748
67749 trace_softirq_entry(vec_nr);
67750 - h->action(h);
67751 + h->action();
67752 trace_softirq_exit(vec_nr);
67753 if (unlikely(prev_count != preempt_count())) {
67754 printk(KERN_ERR "huh, entered softirq %u %s %p"
67755 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
67756 local_irq_restore(flags);
67757 }
67758
67759 -void open_softirq(int nr, void (*action)(struct softirq_action *))
67760 +void open_softirq(int nr, void (*action)(void))
67761 {
67762 - softirq_vec[nr].action = action;
67763 + pax_open_kernel();
67764 + *(void **)&softirq_vec[nr].action = action;
67765 + pax_close_kernel();
67766 }
67767
67768 /*
67769 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
67770
67771 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
67772
67773 -static void tasklet_action(struct softirq_action *a)
67774 +static void tasklet_action(void)
67775 {
67776 struct tasklet_struct *list;
67777
67778 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
67779 }
67780 }
67781
67782 -static void tasklet_hi_action(struct softirq_action *a)
67783 +static void tasklet_hi_action(void)
67784 {
67785 struct tasklet_struct *list;
67786
67787 diff --git a/kernel/sys.c b/kernel/sys.c
67788 index 888d227..f04b318 100644
67789 --- a/kernel/sys.c
67790 +++ b/kernel/sys.c
67791 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
67792 error = -EACCES;
67793 goto out;
67794 }
67795 +
67796 + if (gr_handle_chroot_setpriority(p, niceval)) {
67797 + error = -EACCES;
67798 + goto out;
67799 + }
67800 +
67801 no_nice = security_task_setnice(p, niceval);
67802 if (no_nice) {
67803 error = no_nice;
67804 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
67805 goto error;
67806 }
67807
67808 + if (gr_check_group_change(new->gid, new->egid, -1))
67809 + goto error;
67810 +
67811 if (rgid != (gid_t) -1 ||
67812 (egid != (gid_t) -1 && egid != old->gid))
67813 new->sgid = new->egid;
67814 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
67815 old = current_cred();
67816
67817 retval = -EPERM;
67818 +
67819 + if (gr_check_group_change(gid, gid, gid))
67820 + goto error;
67821 +
67822 if (nsown_capable(CAP_SETGID))
67823 new->gid = new->egid = new->sgid = new->fsgid = gid;
67824 else if (gid == old->gid || gid == old->sgid)
67825 @@ -618,7 +631,7 @@ error:
67826 /*
67827 * change the user struct in a credentials set to match the new UID
67828 */
67829 -static int set_user(struct cred *new)
67830 +int set_user(struct cred *new)
67831 {
67832 struct user_struct *new_user;
67833
67834 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
67835 goto error;
67836 }
67837
67838 + if (gr_check_user_change(new->uid, new->euid, -1))
67839 + goto error;
67840 +
67841 if (new->uid != old->uid) {
67842 retval = set_user(new);
67843 if (retval < 0)
67844 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
67845 old = current_cred();
67846
67847 retval = -EPERM;
67848 +
67849 + if (gr_check_crash_uid(uid))
67850 + goto error;
67851 + if (gr_check_user_change(uid, uid, uid))
67852 + goto error;
67853 +
67854 if (nsown_capable(CAP_SETUID)) {
67855 new->suid = new->uid = uid;
67856 if (uid != old->uid) {
67857 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
67858 goto error;
67859 }
67860
67861 + if (gr_check_user_change(ruid, euid, -1))
67862 + goto error;
67863 +
67864 if (ruid != (uid_t) -1) {
67865 new->uid = ruid;
67866 if (ruid != old->uid) {
67867 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
67868 goto error;
67869 }
67870
67871 + if (gr_check_group_change(rgid, egid, -1))
67872 + goto error;
67873 +
67874 if (rgid != (gid_t) -1)
67875 new->gid = rgid;
67876 if (egid != (gid_t) -1)
67877 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
67878 old = current_cred();
67879 old_fsuid = old->fsuid;
67880
67881 + if (gr_check_user_change(-1, -1, uid))
67882 + goto error;
67883 +
67884 if (uid == old->uid || uid == old->euid ||
67885 uid == old->suid || uid == old->fsuid ||
67886 nsown_capable(CAP_SETUID)) {
67887 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
67888 }
67889 }
67890
67891 +error:
67892 abort_creds(new);
67893 return old_fsuid;
67894
67895 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
67896 if (gid == old->gid || gid == old->egid ||
67897 gid == old->sgid || gid == old->fsgid ||
67898 nsown_capable(CAP_SETGID)) {
67899 + if (gr_check_group_change(-1, -1, gid))
67900 + goto error;
67901 +
67902 if (gid != old_fsgid) {
67903 new->fsgid = gid;
67904 goto change_okay;
67905 }
67906 }
67907
67908 +error:
67909 abort_creds(new);
67910 return old_fsgid;
67911
67912 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
67913 }
67914 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
67915 snprintf(buf, len, "2.6.%u%s", v, rest);
67916 - ret = copy_to_user(release, buf, len);
67917 + if (len > sizeof(buf))
67918 + ret = -EFAULT;
67919 + else
67920 + ret = copy_to_user(release, buf, len);
67921 }
67922 return ret;
67923 }
67924 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
67925 return -EFAULT;
67926
67927 down_read(&uts_sem);
67928 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
67929 + error = __copy_to_user(name->sysname, &utsname()->sysname,
67930 __OLD_UTS_LEN);
67931 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
67932 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
67933 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
67934 __OLD_UTS_LEN);
67935 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
67936 - error |= __copy_to_user(&name->release, &utsname()->release,
67937 + error |= __copy_to_user(name->release, &utsname()->release,
67938 __OLD_UTS_LEN);
67939 error |= __put_user(0, name->release + __OLD_UTS_LEN);
67940 - error |= __copy_to_user(&name->version, &utsname()->version,
67941 + error |= __copy_to_user(name->version, &utsname()->version,
67942 __OLD_UTS_LEN);
67943 error |= __put_user(0, name->version + __OLD_UTS_LEN);
67944 - error |= __copy_to_user(&name->machine, &utsname()->machine,
67945 + error |= __copy_to_user(name->machine, &utsname()->machine,
67946 __OLD_UTS_LEN);
67947 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
67948 up_read(&uts_sem);
67949 @@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
67950 error = get_dumpable(me->mm);
67951 break;
67952 case PR_SET_DUMPABLE:
67953 - if (arg2 < 0 || arg2 > 1) {
67954 + if (arg2 > 1) {
67955 error = -EINVAL;
67956 break;
67957 }
67958 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
67959 index f487f25..9056a9e 100644
67960 --- a/kernel/sysctl.c
67961 +++ b/kernel/sysctl.c
67962 @@ -86,6 +86,13 @@
67963
67964
67965 #if defined(CONFIG_SYSCTL)
67966 +#include <linux/grsecurity.h>
67967 +#include <linux/grinternal.h>
67968 +
67969 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
67970 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67971 + const int op);
67972 +extern int gr_handle_chroot_sysctl(const int op);
67973
67974 /* External variables not in a header file. */
67975 extern int sysctl_overcommit_memory;
67976 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
67977 }
67978
67979 #endif
67980 +extern struct ctl_table grsecurity_table[];
67981
67982 static struct ctl_table root_table[];
67983 static struct ctl_table_root sysctl_table_root;
67984 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
67985 int sysctl_legacy_va_layout;
67986 #endif
67987
67988 +#ifdef CONFIG_PAX_SOFTMODE
67989 +static ctl_table pax_table[] = {
67990 + {
67991 + .procname = "softmode",
67992 + .data = &pax_softmode,
67993 + .maxlen = sizeof(unsigned int),
67994 + .mode = 0600,
67995 + .proc_handler = &proc_dointvec,
67996 + },
67997 +
67998 + { }
67999 +};
68000 +#endif
68001 +
68002 /* The default sysctl tables: */
68003
68004 static struct ctl_table root_table[] = {
68005 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
68006 #endif
68007
68008 static struct ctl_table kern_table[] = {
68009 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68010 + {
68011 + .procname = "grsecurity",
68012 + .mode = 0500,
68013 + .child = grsecurity_table,
68014 + },
68015 +#endif
68016 +
68017 +#ifdef CONFIG_PAX_SOFTMODE
68018 + {
68019 + .procname = "pax",
68020 + .mode = 0500,
68021 + .child = pax_table,
68022 + },
68023 +#endif
68024 +
68025 {
68026 .procname = "sched_child_runs_first",
68027 .data = &sysctl_sched_child_runs_first,
68028 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
68029 .data = &modprobe_path,
68030 .maxlen = KMOD_PATH_LEN,
68031 .mode = 0644,
68032 - .proc_handler = proc_dostring,
68033 + .proc_handler = proc_dostring_modpriv,
68034 },
68035 {
68036 .procname = "modules_disabled",
68037 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
68038 .extra1 = &zero,
68039 .extra2 = &one,
68040 },
68041 +#endif
68042 {
68043 .procname = "kptr_restrict",
68044 .data = &kptr_restrict,
68045 .maxlen = sizeof(int),
68046 .mode = 0644,
68047 .proc_handler = proc_dmesg_restrict,
68048 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68049 + .extra1 = &two,
68050 +#else
68051 .extra1 = &zero,
68052 +#endif
68053 .extra2 = &two,
68054 },
68055 -#endif
68056 {
68057 .procname = "ngroups_max",
68058 .data = &ngroups_max,
68059 @@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
68060 .proc_handler = proc_dointvec_minmax,
68061 .extra1 = &zero,
68062 },
68063 + {
68064 + .procname = "heap_stack_gap",
68065 + .data = &sysctl_heap_stack_gap,
68066 + .maxlen = sizeof(sysctl_heap_stack_gap),
68067 + .mode = 0644,
68068 + .proc_handler = proc_doulongvec_minmax,
68069 + },
68070 #else
68071 {
68072 .procname = "nr_trim_pages",
68073 @@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
68074 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68075 {
68076 int mode;
68077 + int error;
68078 +
68079 + if (table->parent != NULL && table->parent->procname != NULL &&
68080 + table->procname != NULL &&
68081 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68082 + return -EACCES;
68083 + if (gr_handle_chroot_sysctl(op))
68084 + return -EACCES;
68085 + error = gr_handle_sysctl(table, op);
68086 + if (error)
68087 + return error;
68088
68089 if (root->permissions)
68090 mode = root->permissions(root, current->nsproxy, table);
68091 @@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
68092 buffer, lenp, ppos);
68093 }
68094
68095 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68096 + void __user *buffer, size_t *lenp, loff_t *ppos)
68097 +{
68098 + if (write && !capable(CAP_SYS_MODULE))
68099 + return -EPERM;
68100 +
68101 + return _proc_do_string(table->data, table->maxlen, write,
68102 + buffer, lenp, ppos);
68103 +}
68104 +
68105 static size_t proc_skip_spaces(char **buf)
68106 {
68107 size_t ret;
68108 @@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68109 len = strlen(tmp);
68110 if (len > *size)
68111 len = *size;
68112 + if (len > sizeof(tmp))
68113 + len = sizeof(tmp);
68114 if (copy_to_user(*buf, tmp, len))
68115 return -EFAULT;
68116 *size -= len;
68117 @@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68118 *i = val;
68119 } else {
68120 val = convdiv * (*i) / convmul;
68121 - if (!first)
68122 + if (!first) {
68123 err = proc_put_char(&buffer, &left, '\t');
68124 + if (err)
68125 + break;
68126 + }
68127 err = proc_put_long(&buffer, &left, val, false);
68128 if (err)
68129 break;
68130 @@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
68131 return -ENOSYS;
68132 }
68133
68134 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68135 + void __user *buffer, size_t *lenp, loff_t *ppos)
68136 +{
68137 + return -ENOSYS;
68138 +}
68139 +
68140 int proc_dointvec(struct ctl_table *table, int write,
68141 void __user *buffer, size_t *lenp, loff_t *ppos)
68142 {
68143 @@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68144 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68145 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68146 EXPORT_SYMBOL(proc_dostring);
68147 +EXPORT_SYMBOL(proc_dostring_modpriv);
68148 EXPORT_SYMBOL(proc_doulongvec_minmax);
68149 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68150 EXPORT_SYMBOL(register_sysctl_table);
68151 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68152 index a650694..aaeeb20 100644
68153 --- a/kernel/sysctl_binary.c
68154 +++ b/kernel/sysctl_binary.c
68155 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68156 int i;
68157
68158 set_fs(KERNEL_DS);
68159 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68160 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68161 set_fs(old_fs);
68162 if (result < 0)
68163 goto out_kfree;
68164 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68165 }
68166
68167 set_fs(KERNEL_DS);
68168 - result = vfs_write(file, buffer, str - buffer, &pos);
68169 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68170 set_fs(old_fs);
68171 if (result < 0)
68172 goto out_kfree;
68173 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68174 int i;
68175
68176 set_fs(KERNEL_DS);
68177 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68178 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68179 set_fs(old_fs);
68180 if (result < 0)
68181 goto out_kfree;
68182 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68183 }
68184
68185 set_fs(KERNEL_DS);
68186 - result = vfs_write(file, buffer, str - buffer, &pos);
68187 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68188 set_fs(old_fs);
68189 if (result < 0)
68190 goto out_kfree;
68191 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68192 int i;
68193
68194 set_fs(KERNEL_DS);
68195 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68196 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68197 set_fs(old_fs);
68198 if (result < 0)
68199 goto out;
68200 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68201 __le16 dnaddr;
68202
68203 set_fs(KERNEL_DS);
68204 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68205 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68206 set_fs(old_fs);
68207 if (result < 0)
68208 goto out;
68209 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68210 le16_to_cpu(dnaddr) & 0x3ff);
68211
68212 set_fs(KERNEL_DS);
68213 - result = vfs_write(file, buf, len, &pos);
68214 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68215 set_fs(old_fs);
68216 if (result < 0)
68217 goto out;
68218 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
68219 index 362da65..ab8ef8c 100644
68220 --- a/kernel/sysctl_check.c
68221 +++ b/kernel/sysctl_check.c
68222 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
68223 set_fail(&fail, table, "Directory with extra2");
68224 } else {
68225 if ((table->proc_handler == proc_dostring) ||
68226 + (table->proc_handler == proc_dostring_modpriv) ||
68227 (table->proc_handler == proc_dointvec) ||
68228 (table->proc_handler == proc_dointvec_minmax) ||
68229 (table->proc_handler == proc_dointvec_jiffies) ||
68230 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68231 index e660464..c8b9e67 100644
68232 --- a/kernel/taskstats.c
68233 +++ b/kernel/taskstats.c
68234 @@ -27,9 +27,12 @@
68235 #include <linux/cgroup.h>
68236 #include <linux/fs.h>
68237 #include <linux/file.h>
68238 +#include <linux/grsecurity.h>
68239 #include <net/genetlink.h>
68240 #include <linux/atomic.h>
68241
68242 +extern int gr_is_taskstats_denied(int pid);
68243 +
68244 /*
68245 * Maximum length of a cpumask that can be specified in
68246 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68247 @@ -556,6 +559,9 @@ err:
68248
68249 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68250 {
68251 + if (gr_is_taskstats_denied(current->pid))
68252 + return -EACCES;
68253 +
68254 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68255 return cmd_attr_register_cpumask(info);
68256 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68257 diff --git a/kernel/time.c b/kernel/time.c
68258 index 73e416d..cfc6f69 100644
68259 --- a/kernel/time.c
68260 +++ b/kernel/time.c
68261 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68262 return error;
68263
68264 if (tz) {
68265 + /* we log in do_settimeofday called below, so don't log twice
68266 + */
68267 + if (!tv)
68268 + gr_log_timechange();
68269 +
68270 /* SMP safe, global irq locking makes it work. */
68271 sys_tz = *tz;
68272 update_vsyscall_tz();
68273 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68274 index 8a46f5d..bbe6f9c 100644
68275 --- a/kernel/time/alarmtimer.c
68276 +++ b/kernel/time/alarmtimer.c
68277 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
68278 struct platform_device *pdev;
68279 int error = 0;
68280 int i;
68281 - struct k_clock alarm_clock = {
68282 + static struct k_clock alarm_clock = {
68283 .clock_getres = alarm_clock_getres,
68284 .clock_get = alarm_clock_get,
68285 .timer_create = alarm_timer_create,
68286 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68287 index fd4a7b1..fae5c2a 100644
68288 --- a/kernel/time/tick-broadcast.c
68289 +++ b/kernel/time/tick-broadcast.c
68290 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68291 * then clear the broadcast bit.
68292 */
68293 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68294 - int cpu = smp_processor_id();
68295 + cpu = smp_processor_id();
68296
68297 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68298 tick_broadcast_clear_oneshot(cpu);
68299 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68300 index 0c63581..e25dcb6 100644
68301 --- a/kernel/time/timekeeping.c
68302 +++ b/kernel/time/timekeeping.c
68303 @@ -14,6 +14,7 @@
68304 #include <linux/init.h>
68305 #include <linux/mm.h>
68306 #include <linux/sched.h>
68307 +#include <linux/grsecurity.h>
68308 #include <linux/syscore_ops.h>
68309 #include <linux/clocksource.h>
68310 #include <linux/jiffies.h>
68311 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
68312 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68313 return -EINVAL;
68314
68315 + gr_log_timechange();
68316 +
68317 write_seqlock_irqsave(&xtime_lock, flags);
68318
68319 timekeeping_forward_now();
68320 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68321 index 3258455..f35227d 100644
68322 --- a/kernel/time/timer_list.c
68323 +++ b/kernel/time/timer_list.c
68324 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68325
68326 static void print_name_offset(struct seq_file *m, void *sym)
68327 {
68328 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68329 + SEQ_printf(m, "<%p>", NULL);
68330 +#else
68331 char symname[KSYM_NAME_LEN];
68332
68333 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68334 SEQ_printf(m, "<%pK>", sym);
68335 else
68336 SEQ_printf(m, "%s", symname);
68337 +#endif
68338 }
68339
68340 static void
68341 @@ -112,7 +116,11 @@ next_one:
68342 static void
68343 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68344 {
68345 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68346 + SEQ_printf(m, " .base: %p\n", NULL);
68347 +#else
68348 SEQ_printf(m, " .base: %pK\n", base);
68349 +#endif
68350 SEQ_printf(m, " .index: %d\n",
68351 base->index);
68352 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68353 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68354 {
68355 struct proc_dir_entry *pe;
68356
68357 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68358 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68359 +#else
68360 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68361 +#endif
68362 if (!pe)
68363 return -ENOMEM;
68364 return 0;
68365 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68366 index 0b537f2..9e71eca 100644
68367 --- a/kernel/time/timer_stats.c
68368 +++ b/kernel/time/timer_stats.c
68369 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68370 static unsigned long nr_entries;
68371 static struct entry entries[MAX_ENTRIES];
68372
68373 -static atomic_t overflow_count;
68374 +static atomic_unchecked_t overflow_count;
68375
68376 /*
68377 * The entries are in a hash-table, for fast lookup:
68378 @@ -140,7 +140,7 @@ static void reset_entries(void)
68379 nr_entries = 0;
68380 memset(entries, 0, sizeof(entries));
68381 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68382 - atomic_set(&overflow_count, 0);
68383 + atomic_set_unchecked(&overflow_count, 0);
68384 }
68385
68386 static struct entry *alloc_entry(void)
68387 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68388 if (likely(entry))
68389 entry->count++;
68390 else
68391 - atomic_inc(&overflow_count);
68392 + atomic_inc_unchecked(&overflow_count);
68393
68394 out_unlock:
68395 raw_spin_unlock_irqrestore(lock, flags);
68396 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68397
68398 static void print_name_offset(struct seq_file *m, unsigned long addr)
68399 {
68400 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68401 + seq_printf(m, "<%p>", NULL);
68402 +#else
68403 char symname[KSYM_NAME_LEN];
68404
68405 if (lookup_symbol_name(addr, symname) < 0)
68406 seq_printf(m, "<%p>", (void *)addr);
68407 else
68408 seq_printf(m, "%s", symname);
68409 +#endif
68410 }
68411
68412 static int tstats_show(struct seq_file *m, void *v)
68413 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68414
68415 seq_puts(m, "Timer Stats Version: v0.2\n");
68416 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68417 - if (atomic_read(&overflow_count))
68418 + if (atomic_read_unchecked(&overflow_count))
68419 seq_printf(m, "Overflow: %d entries\n",
68420 - atomic_read(&overflow_count));
68421 + atomic_read_unchecked(&overflow_count));
68422
68423 for (i = 0; i < nr_entries; i++) {
68424 entry = entries + i;
68425 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68426 {
68427 struct proc_dir_entry *pe;
68428
68429 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68430 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68431 +#else
68432 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68433 +#endif
68434 if (!pe)
68435 return -ENOMEM;
68436 return 0;
68437 diff --git a/kernel/timer.c b/kernel/timer.c
68438 index a297ffc..5e16b0b 100644
68439 --- a/kernel/timer.c
68440 +++ b/kernel/timer.c
68441 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68442 /*
68443 * This function runs timers and the timer-tq in bottom half context.
68444 */
68445 -static void run_timer_softirq(struct softirq_action *h)
68446 +static void run_timer_softirq(void)
68447 {
68448 struct tvec_base *base = __this_cpu_read(tvec_bases);
68449
68450 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68451 index cdea7b5..9b820d4 100644
68452 --- a/kernel/trace/blktrace.c
68453 +++ b/kernel/trace/blktrace.c
68454 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68455 struct blk_trace *bt = filp->private_data;
68456 char buf[16];
68457
68458 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68459 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68460
68461 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68462 }
68463 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68464 return 1;
68465
68466 bt = buf->chan->private_data;
68467 - atomic_inc(&bt->dropped);
68468 + atomic_inc_unchecked(&bt->dropped);
68469 return 0;
68470 }
68471
68472 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68473
68474 bt->dir = dir;
68475 bt->dev = dev;
68476 - atomic_set(&bt->dropped, 0);
68477 + atomic_set_unchecked(&bt->dropped, 0);
68478
68479 ret = -EIO;
68480 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68481 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68482 index 683d559..d70d914 100644
68483 --- a/kernel/trace/ftrace.c
68484 +++ b/kernel/trace/ftrace.c
68485 @@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68486 if (unlikely(ftrace_disabled))
68487 return 0;
68488
68489 + ret = ftrace_arch_code_modify_prepare();
68490 + FTRACE_WARN_ON(ret);
68491 + if (ret)
68492 + return 0;
68493 +
68494 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68495 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68496 if (ret) {
68497 ftrace_bug(ret, ip);
68498 - return 0;
68499 }
68500 - return 1;
68501 + return ret ? 0 : 1;
68502 }
68503
68504 /*
68505 @@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68506
68507 int
68508 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68509 - void *data)
68510 + void *data)
68511 {
68512 struct ftrace_func_probe *entry;
68513 struct ftrace_page *pg;
68514 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68515 index a3f1bc5..5e651718 100644
68516 --- a/kernel/trace/trace.c
68517 +++ b/kernel/trace/trace.c
68518 @@ -4254,10 +4254,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68519 };
68520 #endif
68521
68522 -static struct dentry *d_tracer;
68523 -
68524 struct dentry *tracing_init_dentry(void)
68525 {
68526 + static struct dentry *d_tracer;
68527 static int once;
68528
68529 if (d_tracer)
68530 @@ -4277,10 +4276,9 @@ struct dentry *tracing_init_dentry(void)
68531 return d_tracer;
68532 }
68533
68534 -static struct dentry *d_percpu;
68535 -
68536 struct dentry *tracing_dentry_percpu(void)
68537 {
68538 + static struct dentry *d_percpu;
68539 static int once;
68540 struct dentry *d_tracer;
68541
68542 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68543 index c212a7f..7b02394 100644
68544 --- a/kernel/trace/trace_events.c
68545 +++ b/kernel/trace/trace_events.c
68546 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
68547 struct ftrace_module_file_ops {
68548 struct list_head list;
68549 struct module *mod;
68550 - struct file_operations id;
68551 - struct file_operations enable;
68552 - struct file_operations format;
68553 - struct file_operations filter;
68554 };
68555
68556 static struct ftrace_module_file_ops *
68557 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
68558
68559 file_ops->mod = mod;
68560
68561 - file_ops->id = ftrace_event_id_fops;
68562 - file_ops->id.owner = mod;
68563 -
68564 - file_ops->enable = ftrace_enable_fops;
68565 - file_ops->enable.owner = mod;
68566 -
68567 - file_ops->filter = ftrace_event_filter_fops;
68568 - file_ops->filter.owner = mod;
68569 -
68570 - file_ops->format = ftrace_event_format_fops;
68571 - file_ops->format.owner = mod;
68572 + pax_open_kernel();
68573 + *(void **)&mod->trace_id.owner = mod;
68574 + *(void **)&mod->trace_enable.owner = mod;
68575 + *(void **)&mod->trace_filter.owner = mod;
68576 + *(void **)&mod->trace_format.owner = mod;
68577 + pax_close_kernel();
68578
68579 list_add(&file_ops->list, &ftrace_module_file_list);
68580
68581 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
68582
68583 for_each_event(call, start, end) {
68584 __trace_add_event_call(*call, mod,
68585 - &file_ops->id, &file_ops->enable,
68586 - &file_ops->filter, &file_ops->format);
68587 + &mod->trace_id, &mod->trace_enable,
68588 + &mod->trace_filter, &mod->trace_format);
68589 }
68590 }
68591
68592 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68593 index 00d527c..7c5b1a3 100644
68594 --- a/kernel/trace/trace_kprobe.c
68595 +++ b/kernel/trace/trace_kprobe.c
68596 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68597 long ret;
68598 int maxlen = get_rloc_len(*(u32 *)dest);
68599 u8 *dst = get_rloc_data(dest);
68600 - u8 *src = addr;
68601 + const u8 __user *src = (const u8 __force_user *)addr;
68602 mm_segment_t old_fs = get_fs();
68603 if (!maxlen)
68604 return;
68605 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68606 pagefault_disable();
68607 do
68608 ret = __copy_from_user_inatomic(dst++, src++, 1);
68609 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68610 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68611 dst[-1] = '\0';
68612 pagefault_enable();
68613 set_fs(old_fs);
68614 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68615 ((u8 *)get_rloc_data(dest))[0] = '\0';
68616 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68617 } else
68618 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68619 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68620 get_rloc_offs(*(u32 *)dest));
68621 }
68622 /* Return the length of string -- including null terminal byte */
68623 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68624 set_fs(KERNEL_DS);
68625 pagefault_disable();
68626 do {
68627 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68628 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68629 len++;
68630 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68631 pagefault_enable();
68632 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68633 index fd3c8aa..5f324a6 100644
68634 --- a/kernel/trace/trace_mmiotrace.c
68635 +++ b/kernel/trace/trace_mmiotrace.c
68636 @@ -24,7 +24,7 @@ struct header_iter {
68637 static struct trace_array *mmio_trace_array;
68638 static bool overrun_detected;
68639 static unsigned long prev_overruns;
68640 -static atomic_t dropped_count;
68641 +static atomic_unchecked_t dropped_count;
68642
68643 static void mmio_reset_data(struct trace_array *tr)
68644 {
68645 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68646
68647 static unsigned long count_overruns(struct trace_iterator *iter)
68648 {
68649 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68650 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68651 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68652
68653 if (over > prev_overruns)
68654 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68655 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68656 sizeof(*entry), 0, pc);
68657 if (!event) {
68658 - atomic_inc(&dropped_count);
68659 + atomic_inc_unchecked(&dropped_count);
68660 return;
68661 }
68662 entry = ring_buffer_event_data(event);
68663 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68664 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68665 sizeof(*entry), 0, pc);
68666 if (!event) {
68667 - atomic_inc(&dropped_count);
68668 + atomic_inc_unchecked(&dropped_count);
68669 return;
68670 }
68671 entry = ring_buffer_event_data(event);
68672 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68673 index 0d6ff35..67e0ed7 100644
68674 --- a/kernel/trace/trace_output.c
68675 +++ b/kernel/trace/trace_output.c
68676 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
68677
68678 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68679 if (!IS_ERR(p)) {
68680 - p = mangle_path(s->buffer + s->len, p, "\n");
68681 + p = mangle_path(s->buffer + s->len, p, "\n\\");
68682 if (p) {
68683 s->len = p - s->buffer;
68684 return 1;
68685 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68686 index d4545f4..a9010a1 100644
68687 --- a/kernel/trace/trace_stack.c
68688 +++ b/kernel/trace/trace_stack.c
68689 @@ -53,7 +53,7 @@ static inline void check_stack(void)
68690 return;
68691
68692 /* we do not handle interrupt stacks yet */
68693 - if (!object_is_on_stack(&this_size))
68694 + if (!object_starts_on_stack(&this_size))
68695 return;
68696
68697 local_irq_save(flags);
68698 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
68699 index 209b379..7f76423 100644
68700 --- a/kernel/trace/trace_workqueue.c
68701 +++ b/kernel/trace/trace_workqueue.c
68702 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
68703 int cpu;
68704 pid_t pid;
68705 /* Can be inserted from interrupt or user context, need to be atomic */
68706 - atomic_t inserted;
68707 + atomic_unchecked_t inserted;
68708 /*
68709 * Don't need to be atomic, works are serialized in a single workqueue thread
68710 * on a single CPU.
68711 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
68712 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
68713 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
68714 if (node->pid == wq_thread->pid) {
68715 - atomic_inc(&node->inserted);
68716 + atomic_inc_unchecked(&node->inserted);
68717 goto found;
68718 }
68719 }
68720 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
68721 tsk = get_pid_task(pid, PIDTYPE_PID);
68722 if (tsk) {
68723 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
68724 - atomic_read(&cws->inserted), cws->executed,
68725 + atomic_read_unchecked(&cws->inserted), cws->executed,
68726 tsk->comm);
68727 put_task_struct(tsk);
68728 }
68729 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
68730 index 8745ac7..d144e37 100644
68731 --- a/lib/Kconfig.debug
68732 +++ b/lib/Kconfig.debug
68733 @@ -1103,6 +1103,7 @@ config LATENCYTOP
68734 depends on DEBUG_KERNEL
68735 depends on STACKTRACE_SUPPORT
68736 depends on PROC_FS
68737 + depends on !GRKERNSEC_HIDESYM
68738 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
68739 select KALLSYMS
68740 select KALLSYMS_ALL
68741 diff --git a/lib/bitmap.c b/lib/bitmap.c
68742 index 0d4a127..33a06c7 100644
68743 --- a/lib/bitmap.c
68744 +++ b/lib/bitmap.c
68745 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
68746 {
68747 int c, old_c, totaldigits, ndigits, nchunks, nbits;
68748 u32 chunk;
68749 - const char __user __force *ubuf = (const char __user __force *)buf;
68750 + const char __user *ubuf = (const char __force_user *)buf;
68751
68752 bitmap_zero(maskp, nmaskbits);
68753
68754 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
68755 {
68756 if (!access_ok(VERIFY_READ, ubuf, ulen))
68757 return -EFAULT;
68758 - return __bitmap_parse((const char __force *)ubuf,
68759 + return __bitmap_parse((const char __force_kernel *)ubuf,
68760 ulen, 1, maskp, nmaskbits);
68761
68762 }
68763 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
68764 {
68765 unsigned a, b;
68766 int c, old_c, totaldigits;
68767 - const char __user __force *ubuf = (const char __user __force *)buf;
68768 + const char __user *ubuf = (const char __force_user *)buf;
68769 int exp_digit, in_range;
68770
68771 totaldigits = c = 0;
68772 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
68773 {
68774 if (!access_ok(VERIFY_READ, ubuf, ulen))
68775 return -EFAULT;
68776 - return __bitmap_parselist((const char __force *)ubuf,
68777 + return __bitmap_parselist((const char __force_kernel *)ubuf,
68778 ulen, 1, maskp, nmaskbits);
68779 }
68780 EXPORT_SYMBOL(bitmap_parselist_user);
68781 diff --git a/lib/bug.c b/lib/bug.c
68782 index a28c141..2bd3d95 100644
68783 --- a/lib/bug.c
68784 +++ b/lib/bug.c
68785 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
68786 return BUG_TRAP_TYPE_NONE;
68787
68788 bug = find_bug(bugaddr);
68789 + if (!bug)
68790 + return BUG_TRAP_TYPE_NONE;
68791
68792 file = NULL;
68793 line = 0;
68794 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
68795 index 0ab9ae8..f01ceca 100644
68796 --- a/lib/debugobjects.c
68797 +++ b/lib/debugobjects.c
68798 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
68799 if (limit > 4)
68800 return;
68801
68802 - is_on_stack = object_is_on_stack(addr);
68803 + is_on_stack = object_starts_on_stack(addr);
68804 if (is_on_stack == onstack)
68805 return;
68806
68807 diff --git a/lib/devres.c b/lib/devres.c
68808 index 9676617..5149e15 100644
68809 --- a/lib/devres.c
68810 +++ b/lib/devres.c
68811 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
68812 void devm_iounmap(struct device *dev, void __iomem *addr)
68813 {
68814 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
68815 - (void *)addr));
68816 + (void __force *)addr));
68817 iounmap(addr);
68818 }
68819 EXPORT_SYMBOL(devm_iounmap);
68820 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
68821 {
68822 ioport_unmap(addr);
68823 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
68824 - devm_ioport_map_match, (void *)addr));
68825 + devm_ioport_map_match, (void __force *)addr));
68826 }
68827 EXPORT_SYMBOL(devm_ioport_unmap);
68828
68829 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
68830 index fea790a..ebb0e82 100644
68831 --- a/lib/dma-debug.c
68832 +++ b/lib/dma-debug.c
68833 @@ -925,7 +925,7 @@ out:
68834
68835 static void check_for_stack(struct device *dev, void *addr)
68836 {
68837 - if (object_is_on_stack(addr))
68838 + if (object_starts_on_stack(addr))
68839 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
68840 "stack [addr=%p]\n", addr);
68841 }
68842 diff --git a/lib/extable.c b/lib/extable.c
68843 index 4cac81e..63e9b8f 100644
68844 --- a/lib/extable.c
68845 +++ b/lib/extable.c
68846 @@ -13,6 +13,7 @@
68847 #include <linux/init.h>
68848 #include <linux/sort.h>
68849 #include <asm/uaccess.h>
68850 +#include <asm/pgtable.h>
68851
68852 #ifndef ARCH_HAS_SORT_EXTABLE
68853 /*
68854 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
68855 void sort_extable(struct exception_table_entry *start,
68856 struct exception_table_entry *finish)
68857 {
68858 + pax_open_kernel();
68859 sort(start, finish - start, sizeof(struct exception_table_entry),
68860 cmp_ex, NULL);
68861 + pax_close_kernel();
68862 }
68863
68864 #ifdef CONFIG_MODULES
68865 diff --git a/lib/inflate.c b/lib/inflate.c
68866 index 013a761..c28f3fc 100644
68867 --- a/lib/inflate.c
68868 +++ b/lib/inflate.c
68869 @@ -269,7 +269,7 @@ static void free(void *where)
68870 malloc_ptr = free_mem_ptr;
68871 }
68872 #else
68873 -#define malloc(a) kmalloc(a, GFP_KERNEL)
68874 +#define malloc(a) kmalloc((a), GFP_KERNEL)
68875 #define free(a) kfree(a)
68876 #endif
68877
68878 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
68879 index bd2bea9..6b3c95e 100644
68880 --- a/lib/is_single_threaded.c
68881 +++ b/lib/is_single_threaded.c
68882 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
68883 struct task_struct *p, *t;
68884 bool ret;
68885
68886 + if (!mm)
68887 + return true;
68888 +
68889 if (atomic_read(&task->signal->live) != 1)
68890 return false;
68891
68892 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
68893 index dc63d08..95ae14a 100644
68894 --- a/lib/radix-tree.c
68895 +++ b/lib/radix-tree.c
68896 @@ -78,7 +78,7 @@ struct radix_tree_preload {
68897 int nr;
68898 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
68899 };
68900 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
68901 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
68902
68903 static inline void *ptr_to_indirect(void *ptr)
68904 {
68905 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
68906 index 38e612e..4fb99a8 100644
68907 --- a/lib/vsprintf.c
68908 +++ b/lib/vsprintf.c
68909 @@ -16,6 +16,9 @@
68910 * - scnprintf and vscnprintf
68911 */
68912
68913 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68914 +#define __INCLUDED_BY_HIDESYM 1
68915 +#endif
68916 #include <stdarg.h>
68917 #include <linux/module.h>
68918 #include <linux/types.h>
68919 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
68920 char sym[KSYM_SYMBOL_LEN];
68921 if (ext == 'B')
68922 sprint_backtrace(sym, value);
68923 - else if (ext != 'f' && ext != 's')
68924 + else if (ext != 'f' && ext != 's' && ext != 'a')
68925 sprint_symbol(sym, value);
68926 else
68927 kallsyms_lookup(value, NULL, NULL, NULL, sym);
68928 @@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
68929 return number(buf, end, *(const netdev_features_t *)addr, spec);
68930 }
68931
68932 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68933 +int kptr_restrict __read_mostly = 2;
68934 +#else
68935 int kptr_restrict __read_mostly;
68936 +#endif
68937
68938 /*
68939 * Show a '%p' thing. A kernel extension is that the '%p' is followed
68940 @@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
68941 * - 'S' For symbolic direct pointers with offset
68942 * - 's' For symbolic direct pointers without offset
68943 * - 'B' For backtraced symbolic direct pointers with offset
68944 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
68945 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
68946 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
68947 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
68948 * - 'M' For a 6-byte MAC address, it prints the address in the
68949 @@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
68950 {
68951 if (!ptr && *fmt != 'K') {
68952 /*
68953 - * Print (null) with the same width as a pointer so it makes
68954 + * Print (nil) with the same width as a pointer so it makes
68955 * tabular output look nice.
68956 */
68957 if (spec.field_width == -1)
68958 spec.field_width = 2 * sizeof(void *);
68959 - return string(buf, end, "(null)", spec);
68960 + return string(buf, end, "(nil)", spec);
68961 }
68962
68963 switch (*fmt) {
68964 @@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
68965 /* Fallthrough */
68966 case 'S':
68967 case 's':
68968 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68969 + break;
68970 +#else
68971 + return symbol_string(buf, end, ptr, spec, *fmt);
68972 +#endif
68973 + case 'A':
68974 + case 'a':
68975 case 'B':
68976 return symbol_string(buf, end, ptr, spec, *fmt);
68977 case 'R':
68978 @@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
68979 typeof(type) value; \
68980 if (sizeof(type) == 8) { \
68981 args = PTR_ALIGN(args, sizeof(u32)); \
68982 - *(u32 *)&value = *(u32 *)args; \
68983 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
68984 + *(u32 *)&value = *(const u32 *)args; \
68985 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
68986 } else { \
68987 args = PTR_ALIGN(args, sizeof(type)); \
68988 - value = *(typeof(type) *)args; \
68989 + value = *(const typeof(type) *)args; \
68990 } \
68991 args += sizeof(type); \
68992 value; \
68993 @@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
68994 case FORMAT_TYPE_STR: {
68995 const char *str_arg = args;
68996 args += strlen(str_arg) + 1;
68997 - str = string(str, end, (char *)str_arg, spec);
68998 + str = string(str, end, str_arg, spec);
68999 break;
69000 }
69001
69002 diff --git a/localversion-grsec b/localversion-grsec
69003 new file mode 100644
69004 index 0000000..7cd6065
69005 --- /dev/null
69006 +++ b/localversion-grsec
69007 @@ -0,0 +1 @@
69008 +-grsec
69009 diff --git a/mm/Kconfig b/mm/Kconfig
69010 index e338407..49b5b7a 100644
69011 --- a/mm/Kconfig
69012 +++ b/mm/Kconfig
69013 @@ -247,10 +247,10 @@ config KSM
69014 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69015
69016 config DEFAULT_MMAP_MIN_ADDR
69017 - int "Low address space to protect from user allocation"
69018 + int "Low address space to protect from user allocation"
69019 depends on MMU
69020 - default 4096
69021 - help
69022 + default 65536
69023 + help
69024 This is the portion of low virtual memory which should be protected
69025 from userspace allocation. Keeping a user from writing to low pages
69026 can help reduce the impact of kernel NULL pointer bugs.
69027 diff --git a/mm/filemap.c b/mm/filemap.c
69028 index b662757..3081ddd 100644
69029 --- a/mm/filemap.c
69030 +++ b/mm/filemap.c
69031 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69032 struct address_space *mapping = file->f_mapping;
69033
69034 if (!mapping->a_ops->readpage)
69035 - return -ENOEXEC;
69036 + return -ENODEV;
69037 file_accessed(file);
69038 vma->vm_ops = &generic_file_vm_ops;
69039 vma->vm_flags |= VM_CAN_NONLINEAR;
69040 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69041 *pos = i_size_read(inode);
69042
69043 if (limit != RLIM_INFINITY) {
69044 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69045 if (*pos >= limit) {
69046 send_sig(SIGXFSZ, current, 0);
69047 return -EFBIG;
69048 diff --git a/mm/fremap.c b/mm/fremap.c
69049 index 9ed4fd4..c42648d 100644
69050 --- a/mm/fremap.c
69051 +++ b/mm/fremap.c
69052 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69053 retry:
69054 vma = find_vma(mm, start);
69055
69056 +#ifdef CONFIG_PAX_SEGMEXEC
69057 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69058 + goto out;
69059 +#endif
69060 +
69061 /*
69062 * Make sure the vma is shared, that it supports prefaulting,
69063 * and that the remapped range is valid and fully within
69064 diff --git a/mm/highmem.c b/mm/highmem.c
69065 index 57d82c6..e9e0552 100644
69066 --- a/mm/highmem.c
69067 +++ b/mm/highmem.c
69068 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69069 * So no dangers, even with speculative execution.
69070 */
69071 page = pte_page(pkmap_page_table[i]);
69072 + pax_open_kernel();
69073 pte_clear(&init_mm, (unsigned long)page_address(page),
69074 &pkmap_page_table[i]);
69075 -
69076 + pax_close_kernel();
69077 set_page_address(page, NULL);
69078 need_flush = 1;
69079 }
69080 @@ -186,9 +187,11 @@ start:
69081 }
69082 }
69083 vaddr = PKMAP_ADDR(last_pkmap_nr);
69084 +
69085 + pax_open_kernel();
69086 set_pte_at(&init_mm, vaddr,
69087 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69088 -
69089 + pax_close_kernel();
69090 pkmap_count[last_pkmap_nr] = 1;
69091 set_page_address(page, (void *)vaddr);
69092
69093 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69094 index 8f7fc39..69bf1e9 100644
69095 --- a/mm/huge_memory.c
69096 +++ b/mm/huge_memory.c
69097 @@ -733,7 +733,7 @@ out:
69098 * run pte_offset_map on the pmd, if an huge pmd could
69099 * materialize from under us from a different thread.
69100 */
69101 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69102 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69103 return VM_FAULT_OOM;
69104 /* if an huge pmd materialized from under us just retry later */
69105 if (unlikely(pmd_trans_huge(*pmd)))
69106 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69107 index a876871..132cde0 100644
69108 --- a/mm/hugetlb.c
69109 +++ b/mm/hugetlb.c
69110 @@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69111 return 1;
69112 }
69113
69114 +#ifdef CONFIG_PAX_SEGMEXEC
69115 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69116 +{
69117 + struct mm_struct *mm = vma->vm_mm;
69118 + struct vm_area_struct *vma_m;
69119 + unsigned long address_m;
69120 + pte_t *ptep_m;
69121 +
69122 + vma_m = pax_find_mirror_vma(vma);
69123 + if (!vma_m)
69124 + return;
69125 +
69126 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69127 + address_m = address + SEGMEXEC_TASK_SIZE;
69128 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69129 + get_page(page_m);
69130 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69131 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69132 +}
69133 +#endif
69134 +
69135 /*
69136 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69137 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69138 @@ -2459,6 +2480,11 @@ retry_avoidcopy:
69139 make_huge_pte(vma, new_page, 1));
69140 page_remove_rmap(old_page);
69141 hugepage_add_new_anon_rmap(new_page, vma, address);
69142 +
69143 +#ifdef CONFIG_PAX_SEGMEXEC
69144 + pax_mirror_huge_pte(vma, address, new_page);
69145 +#endif
69146 +
69147 /* Make the old page be freed below */
69148 new_page = old_page;
69149 mmu_notifier_invalidate_range_end(mm,
69150 @@ -2613,6 +2639,10 @@ retry:
69151 && (vma->vm_flags & VM_SHARED)));
69152 set_huge_pte_at(mm, address, ptep, new_pte);
69153
69154 +#ifdef CONFIG_PAX_SEGMEXEC
69155 + pax_mirror_huge_pte(vma, address, page);
69156 +#endif
69157 +
69158 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69159 /* Optimization, do the COW without a second fault */
69160 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69161 @@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69162 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69163 struct hstate *h = hstate_vma(vma);
69164
69165 +#ifdef CONFIG_PAX_SEGMEXEC
69166 + struct vm_area_struct *vma_m;
69167 +#endif
69168 +
69169 address &= huge_page_mask(h);
69170
69171 ptep = huge_pte_offset(mm, address);
69172 @@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69173 VM_FAULT_SET_HINDEX(h - hstates);
69174 }
69175
69176 +#ifdef CONFIG_PAX_SEGMEXEC
69177 + vma_m = pax_find_mirror_vma(vma);
69178 + if (vma_m) {
69179 + unsigned long address_m;
69180 +
69181 + if (vma->vm_start > vma_m->vm_start) {
69182 + address_m = address;
69183 + address -= SEGMEXEC_TASK_SIZE;
69184 + vma = vma_m;
69185 + h = hstate_vma(vma);
69186 + } else
69187 + address_m = address + SEGMEXEC_TASK_SIZE;
69188 +
69189 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69190 + return VM_FAULT_OOM;
69191 + address_m &= HPAGE_MASK;
69192 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69193 + }
69194 +#endif
69195 +
69196 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69197 if (!ptep)
69198 return VM_FAULT_OOM;
69199 diff --git a/mm/internal.h b/mm/internal.h
69200 index 2189af4..f2ca332 100644
69201 --- a/mm/internal.h
69202 +++ b/mm/internal.h
69203 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69204 * in mm/page_alloc.c
69205 */
69206 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69207 +extern void free_compound_page(struct page *page);
69208 extern void prep_compound_page(struct page *page, unsigned long order);
69209 #ifdef CONFIG_MEMORY_FAILURE
69210 extern bool is_free_buddy_page(struct page *page);
69211 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69212 index 45eb621..6ccd8ea 100644
69213 --- a/mm/kmemleak.c
69214 +++ b/mm/kmemleak.c
69215 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69216
69217 for (i = 0; i < object->trace_len; i++) {
69218 void *ptr = (void *)object->trace[i];
69219 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69220 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69221 }
69222 }
69223
69224 diff --git a/mm/maccess.c b/mm/maccess.c
69225 index d53adf9..03a24bf 100644
69226 --- a/mm/maccess.c
69227 +++ b/mm/maccess.c
69228 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69229 set_fs(KERNEL_DS);
69230 pagefault_disable();
69231 ret = __copy_from_user_inatomic(dst,
69232 - (__force const void __user *)src, size);
69233 + (const void __force_user *)src, size);
69234 pagefault_enable();
69235 set_fs(old_fs);
69236
69237 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69238
69239 set_fs(KERNEL_DS);
69240 pagefault_disable();
69241 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69242 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69243 pagefault_enable();
69244 set_fs(old_fs);
69245
69246 diff --git a/mm/madvise.c b/mm/madvise.c
69247 index 74bf193..feb6fd3 100644
69248 --- a/mm/madvise.c
69249 +++ b/mm/madvise.c
69250 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69251 pgoff_t pgoff;
69252 unsigned long new_flags = vma->vm_flags;
69253
69254 +#ifdef CONFIG_PAX_SEGMEXEC
69255 + struct vm_area_struct *vma_m;
69256 +#endif
69257 +
69258 switch (behavior) {
69259 case MADV_NORMAL:
69260 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69261 @@ -110,6 +114,13 @@ success:
69262 /*
69263 * vm_flags is protected by the mmap_sem held in write mode.
69264 */
69265 +
69266 +#ifdef CONFIG_PAX_SEGMEXEC
69267 + vma_m = pax_find_mirror_vma(vma);
69268 + if (vma_m)
69269 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69270 +#endif
69271 +
69272 vma->vm_flags = new_flags;
69273
69274 out:
69275 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69276 struct vm_area_struct ** prev,
69277 unsigned long start, unsigned long end)
69278 {
69279 +
69280 +#ifdef CONFIG_PAX_SEGMEXEC
69281 + struct vm_area_struct *vma_m;
69282 +#endif
69283 +
69284 *prev = vma;
69285 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69286 return -EINVAL;
69287 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69288 zap_page_range(vma, start, end - start, &details);
69289 } else
69290 zap_page_range(vma, start, end - start, NULL);
69291 +
69292 +#ifdef CONFIG_PAX_SEGMEXEC
69293 + vma_m = pax_find_mirror_vma(vma);
69294 + if (vma_m) {
69295 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69296 + struct zap_details details = {
69297 + .nonlinear_vma = vma_m,
69298 + .last_index = ULONG_MAX,
69299 + };
69300 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69301 + } else
69302 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69303 + }
69304 +#endif
69305 +
69306 return 0;
69307 }
69308
69309 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69310 if (end < start)
69311 goto out;
69312
69313 +#ifdef CONFIG_PAX_SEGMEXEC
69314 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69315 + if (end > SEGMEXEC_TASK_SIZE)
69316 + goto out;
69317 + } else
69318 +#endif
69319 +
69320 + if (end > TASK_SIZE)
69321 + goto out;
69322 +
69323 error = 0;
69324 if (end == start)
69325 goto out;
69326 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69327 index 56080ea..115071e 100644
69328 --- a/mm/memory-failure.c
69329 +++ b/mm/memory-failure.c
69330 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69331
69332 int sysctl_memory_failure_recovery __read_mostly = 1;
69333
69334 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69335 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69336
69337 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69338
69339 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
69340 si.si_signo = SIGBUS;
69341 si.si_errno = 0;
69342 si.si_code = BUS_MCEERR_AO;
69343 - si.si_addr = (void *)addr;
69344 + si.si_addr = (void __user *)addr;
69345 #ifdef __ARCH_SI_TRAPNO
69346 si.si_trapno = trapno;
69347 #endif
69348 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69349 }
69350
69351 nr_pages = 1 << compound_trans_order(hpage);
69352 - atomic_long_add(nr_pages, &mce_bad_pages);
69353 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69354
69355 /*
69356 * We need/can do nothing about count=0 pages.
69357 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69358 if (!PageHWPoison(hpage)
69359 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69360 || (p != hpage && TestSetPageHWPoison(hpage))) {
69361 - atomic_long_sub(nr_pages, &mce_bad_pages);
69362 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69363 return 0;
69364 }
69365 set_page_hwpoison_huge_page(hpage);
69366 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69367 }
69368 if (hwpoison_filter(p)) {
69369 if (TestClearPageHWPoison(p))
69370 - atomic_long_sub(nr_pages, &mce_bad_pages);
69371 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69372 unlock_page(hpage);
69373 put_page(hpage);
69374 return 0;
69375 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
69376 return 0;
69377 }
69378 if (TestClearPageHWPoison(p))
69379 - atomic_long_sub(nr_pages, &mce_bad_pages);
69380 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69381 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69382 return 0;
69383 }
69384 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
69385 */
69386 if (TestClearPageHWPoison(page)) {
69387 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69388 - atomic_long_sub(nr_pages, &mce_bad_pages);
69389 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69390 freeit = 1;
69391 if (PageHuge(page))
69392 clear_page_hwpoison_huge_page(page);
69393 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69394 }
69395 done:
69396 if (!PageHWPoison(hpage))
69397 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69398 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69399 set_page_hwpoison_huge_page(hpage);
69400 dequeue_hwpoisoned_huge_page(hpage);
69401 /* keep elevated page count for bad page */
69402 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
69403 return ret;
69404
69405 done:
69406 - atomic_long_add(1, &mce_bad_pages);
69407 + atomic_long_add_unchecked(1, &mce_bad_pages);
69408 SetPageHWPoison(page);
69409 /* keep elevated page count for bad page */
69410 return ret;
69411 diff --git a/mm/memory.c b/mm/memory.c
69412 index fa2f04e..a8a40c8 100644
69413 --- a/mm/memory.c
69414 +++ b/mm/memory.c
69415 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69416 return;
69417
69418 pmd = pmd_offset(pud, start);
69419 +
69420 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69421 pud_clear(pud);
69422 pmd_free_tlb(tlb, pmd, start);
69423 +#endif
69424 +
69425 }
69426
69427 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69428 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69429 if (end - 1 > ceiling - 1)
69430 return;
69431
69432 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69433 pud = pud_offset(pgd, start);
69434 pgd_clear(pgd);
69435 pud_free_tlb(tlb, pud, start);
69436 +#endif
69437 +
69438 }
69439
69440 /*
69441 @@ -1585,12 +1592,6 @@ no_page_table:
69442 return page;
69443 }
69444
69445 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69446 -{
69447 - return stack_guard_page_start(vma, addr) ||
69448 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69449 -}
69450 -
69451 /**
69452 * __get_user_pages() - pin user pages in memory
69453 * @tsk: task_struct of target task
69454 @@ -1663,10 +1664,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69455 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69456 i = 0;
69457
69458 - do {
69459 + while (nr_pages) {
69460 struct vm_area_struct *vma;
69461
69462 - vma = find_extend_vma(mm, start);
69463 + vma = find_vma(mm, start);
69464 if (!vma && in_gate_area(mm, start)) {
69465 unsigned long pg = start & PAGE_MASK;
69466 pgd_t *pgd;
69467 @@ -1714,7 +1715,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69468 goto next_page;
69469 }
69470
69471 - if (!vma ||
69472 + if (!vma || start < vma->vm_start ||
69473 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69474 !(vm_flags & vma->vm_flags))
69475 return i ? : -EFAULT;
69476 @@ -1741,11 +1742,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69477 int ret;
69478 unsigned int fault_flags = 0;
69479
69480 - /* For mlock, just skip the stack guard page. */
69481 - if (foll_flags & FOLL_MLOCK) {
69482 - if (stack_guard_page(vma, start))
69483 - goto next_page;
69484 - }
69485 if (foll_flags & FOLL_WRITE)
69486 fault_flags |= FAULT_FLAG_WRITE;
69487 if (nonblocking)
69488 @@ -1819,7 +1815,7 @@ next_page:
69489 start += PAGE_SIZE;
69490 nr_pages--;
69491 } while (nr_pages && start < vma->vm_end);
69492 - } while (nr_pages);
69493 + }
69494 return i;
69495 }
69496 EXPORT_SYMBOL(__get_user_pages);
69497 @@ -2026,6 +2022,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69498 page_add_file_rmap(page);
69499 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69500
69501 +#ifdef CONFIG_PAX_SEGMEXEC
69502 + pax_mirror_file_pte(vma, addr, page, ptl);
69503 +#endif
69504 +
69505 retval = 0;
69506 pte_unmap_unlock(pte, ptl);
69507 return retval;
69508 @@ -2060,10 +2060,22 @@ out:
69509 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69510 struct page *page)
69511 {
69512 +
69513 +#ifdef CONFIG_PAX_SEGMEXEC
69514 + struct vm_area_struct *vma_m;
69515 +#endif
69516 +
69517 if (addr < vma->vm_start || addr >= vma->vm_end)
69518 return -EFAULT;
69519 if (!page_count(page))
69520 return -EINVAL;
69521 +
69522 +#ifdef CONFIG_PAX_SEGMEXEC
69523 + vma_m = pax_find_mirror_vma(vma);
69524 + if (vma_m)
69525 + vma_m->vm_flags |= VM_INSERTPAGE;
69526 +#endif
69527 +
69528 vma->vm_flags |= VM_INSERTPAGE;
69529 return insert_page(vma, addr, page, vma->vm_page_prot);
69530 }
69531 @@ -2149,6 +2161,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69532 unsigned long pfn)
69533 {
69534 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69535 + BUG_ON(vma->vm_mirror);
69536
69537 if (addr < vma->vm_start || addr >= vma->vm_end)
69538 return -EFAULT;
69539 @@ -2464,6 +2477,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69540 copy_user_highpage(dst, src, va, vma);
69541 }
69542
69543 +#ifdef CONFIG_PAX_SEGMEXEC
69544 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69545 +{
69546 + struct mm_struct *mm = vma->vm_mm;
69547 + spinlock_t *ptl;
69548 + pte_t *pte, entry;
69549 +
69550 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69551 + entry = *pte;
69552 + if (!pte_present(entry)) {
69553 + if (!pte_none(entry)) {
69554 + BUG_ON(pte_file(entry));
69555 + free_swap_and_cache(pte_to_swp_entry(entry));
69556 + pte_clear_not_present_full(mm, address, pte, 0);
69557 + }
69558 + } else {
69559 + struct page *page;
69560 +
69561 + flush_cache_page(vma, address, pte_pfn(entry));
69562 + entry = ptep_clear_flush(vma, address, pte);
69563 + BUG_ON(pte_dirty(entry));
69564 + page = vm_normal_page(vma, address, entry);
69565 + if (page) {
69566 + update_hiwater_rss(mm);
69567 + if (PageAnon(page))
69568 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69569 + else
69570 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69571 + page_remove_rmap(page);
69572 + page_cache_release(page);
69573 + }
69574 + }
69575 + pte_unmap_unlock(pte, ptl);
69576 +}
69577 +
69578 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69579 + *
69580 + * the ptl of the lower mapped page is held on entry and is not released on exit
69581 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69582 + */
69583 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69584 +{
69585 + struct mm_struct *mm = vma->vm_mm;
69586 + unsigned long address_m;
69587 + spinlock_t *ptl_m;
69588 + struct vm_area_struct *vma_m;
69589 + pmd_t *pmd_m;
69590 + pte_t *pte_m, entry_m;
69591 +
69592 + BUG_ON(!page_m || !PageAnon(page_m));
69593 +
69594 + vma_m = pax_find_mirror_vma(vma);
69595 + if (!vma_m)
69596 + return;
69597 +
69598 + BUG_ON(!PageLocked(page_m));
69599 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69600 + address_m = address + SEGMEXEC_TASK_SIZE;
69601 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69602 + pte_m = pte_offset_map(pmd_m, address_m);
69603 + ptl_m = pte_lockptr(mm, pmd_m);
69604 + if (ptl != ptl_m) {
69605 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69606 + if (!pte_none(*pte_m))
69607 + goto out;
69608 + }
69609 +
69610 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69611 + page_cache_get(page_m);
69612 + page_add_anon_rmap(page_m, vma_m, address_m);
69613 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69614 + set_pte_at(mm, address_m, pte_m, entry_m);
69615 + update_mmu_cache(vma_m, address_m, entry_m);
69616 +out:
69617 + if (ptl != ptl_m)
69618 + spin_unlock(ptl_m);
69619 + pte_unmap(pte_m);
69620 + unlock_page(page_m);
69621 +}
69622 +
69623 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69624 +{
69625 + struct mm_struct *mm = vma->vm_mm;
69626 + unsigned long address_m;
69627 + spinlock_t *ptl_m;
69628 + struct vm_area_struct *vma_m;
69629 + pmd_t *pmd_m;
69630 + pte_t *pte_m, entry_m;
69631 +
69632 + BUG_ON(!page_m || PageAnon(page_m));
69633 +
69634 + vma_m = pax_find_mirror_vma(vma);
69635 + if (!vma_m)
69636 + return;
69637 +
69638 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69639 + address_m = address + SEGMEXEC_TASK_SIZE;
69640 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69641 + pte_m = pte_offset_map(pmd_m, address_m);
69642 + ptl_m = pte_lockptr(mm, pmd_m);
69643 + if (ptl != ptl_m) {
69644 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69645 + if (!pte_none(*pte_m))
69646 + goto out;
69647 + }
69648 +
69649 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69650 + page_cache_get(page_m);
69651 + page_add_file_rmap(page_m);
69652 + inc_mm_counter_fast(mm, MM_FILEPAGES);
69653 + set_pte_at(mm, address_m, pte_m, entry_m);
69654 + update_mmu_cache(vma_m, address_m, entry_m);
69655 +out:
69656 + if (ptl != ptl_m)
69657 + spin_unlock(ptl_m);
69658 + pte_unmap(pte_m);
69659 +}
69660 +
69661 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
69662 +{
69663 + struct mm_struct *mm = vma->vm_mm;
69664 + unsigned long address_m;
69665 + spinlock_t *ptl_m;
69666 + struct vm_area_struct *vma_m;
69667 + pmd_t *pmd_m;
69668 + pte_t *pte_m, entry_m;
69669 +
69670 + vma_m = pax_find_mirror_vma(vma);
69671 + if (!vma_m)
69672 + return;
69673 +
69674 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69675 + address_m = address + SEGMEXEC_TASK_SIZE;
69676 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69677 + pte_m = pte_offset_map(pmd_m, address_m);
69678 + ptl_m = pte_lockptr(mm, pmd_m);
69679 + if (ptl != ptl_m) {
69680 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69681 + if (!pte_none(*pte_m))
69682 + goto out;
69683 + }
69684 +
69685 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
69686 + set_pte_at(mm, address_m, pte_m, entry_m);
69687 +out:
69688 + if (ptl != ptl_m)
69689 + spin_unlock(ptl_m);
69690 + pte_unmap(pte_m);
69691 +}
69692 +
69693 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
69694 +{
69695 + struct page *page_m;
69696 + pte_t entry;
69697 +
69698 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
69699 + goto out;
69700 +
69701 + entry = *pte;
69702 + page_m = vm_normal_page(vma, address, entry);
69703 + if (!page_m)
69704 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
69705 + else if (PageAnon(page_m)) {
69706 + if (pax_find_mirror_vma(vma)) {
69707 + pte_unmap_unlock(pte, ptl);
69708 + lock_page(page_m);
69709 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
69710 + if (pte_same(entry, *pte))
69711 + pax_mirror_anon_pte(vma, address, page_m, ptl);
69712 + else
69713 + unlock_page(page_m);
69714 + }
69715 + } else
69716 + pax_mirror_file_pte(vma, address, page_m, ptl);
69717 +
69718 +out:
69719 + pte_unmap_unlock(pte, ptl);
69720 +}
69721 +#endif
69722 +
69723 /*
69724 * This routine handles present pages, when users try to write
69725 * to a shared page. It is done by copying the page to a new address
69726 @@ -2675,6 +2868,12 @@ gotten:
69727 */
69728 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69729 if (likely(pte_same(*page_table, orig_pte))) {
69730 +
69731 +#ifdef CONFIG_PAX_SEGMEXEC
69732 + if (pax_find_mirror_vma(vma))
69733 + BUG_ON(!trylock_page(new_page));
69734 +#endif
69735 +
69736 if (old_page) {
69737 if (!PageAnon(old_page)) {
69738 dec_mm_counter_fast(mm, MM_FILEPAGES);
69739 @@ -2726,6 +2925,10 @@ gotten:
69740 page_remove_rmap(old_page);
69741 }
69742
69743 +#ifdef CONFIG_PAX_SEGMEXEC
69744 + pax_mirror_anon_pte(vma, address, new_page, ptl);
69745 +#endif
69746 +
69747 /* Free the old page.. */
69748 new_page = old_page;
69749 ret |= VM_FAULT_WRITE;
69750 @@ -3005,6 +3208,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69751 swap_free(entry);
69752 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
69753 try_to_free_swap(page);
69754 +
69755 +#ifdef CONFIG_PAX_SEGMEXEC
69756 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
69757 +#endif
69758 +
69759 unlock_page(page);
69760 if (swapcache) {
69761 /*
69762 @@ -3028,6 +3236,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69763
69764 /* No need to invalidate - it was non-present before */
69765 update_mmu_cache(vma, address, page_table);
69766 +
69767 +#ifdef CONFIG_PAX_SEGMEXEC
69768 + pax_mirror_anon_pte(vma, address, page, ptl);
69769 +#endif
69770 +
69771 unlock:
69772 pte_unmap_unlock(page_table, ptl);
69773 out:
69774 @@ -3047,40 +3260,6 @@ out_release:
69775 }
69776
69777 /*
69778 - * This is like a special single-page "expand_{down|up}wards()",
69779 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
69780 - * doesn't hit another vma.
69781 - */
69782 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
69783 -{
69784 - address &= PAGE_MASK;
69785 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
69786 - struct vm_area_struct *prev = vma->vm_prev;
69787 -
69788 - /*
69789 - * Is there a mapping abutting this one below?
69790 - *
69791 - * That's only ok if it's the same stack mapping
69792 - * that has gotten split..
69793 - */
69794 - if (prev && prev->vm_end == address)
69795 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
69796 -
69797 - expand_downwards(vma, address - PAGE_SIZE);
69798 - }
69799 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
69800 - struct vm_area_struct *next = vma->vm_next;
69801 -
69802 - /* As VM_GROWSDOWN but s/below/above/ */
69803 - if (next && next->vm_start == address + PAGE_SIZE)
69804 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
69805 -
69806 - expand_upwards(vma, address + PAGE_SIZE);
69807 - }
69808 - return 0;
69809 -}
69810 -
69811 -/*
69812 * We enter with non-exclusive mmap_sem (to exclude vma changes,
69813 * but allow concurrent faults), and pte mapped but not yet locked.
69814 * We return with mmap_sem still held, but pte unmapped and unlocked.
69815 @@ -3089,27 +3268,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
69816 unsigned long address, pte_t *page_table, pmd_t *pmd,
69817 unsigned int flags)
69818 {
69819 - struct page *page;
69820 + struct page *page = NULL;
69821 spinlock_t *ptl;
69822 pte_t entry;
69823
69824 - pte_unmap(page_table);
69825 -
69826 - /* Check if we need to add a guard page to the stack */
69827 - if (check_stack_guard_page(vma, address) < 0)
69828 - return VM_FAULT_SIGBUS;
69829 -
69830 - /* Use the zero-page for reads */
69831 if (!(flags & FAULT_FLAG_WRITE)) {
69832 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
69833 vma->vm_page_prot));
69834 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69835 + ptl = pte_lockptr(mm, pmd);
69836 + spin_lock(ptl);
69837 if (!pte_none(*page_table))
69838 goto unlock;
69839 goto setpte;
69840 }
69841
69842 /* Allocate our own private page. */
69843 + pte_unmap(page_table);
69844 +
69845 if (unlikely(anon_vma_prepare(vma)))
69846 goto oom;
69847 page = alloc_zeroed_user_highpage_movable(vma, address);
69848 @@ -3128,6 +3303,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
69849 if (!pte_none(*page_table))
69850 goto release;
69851
69852 +#ifdef CONFIG_PAX_SEGMEXEC
69853 + if (pax_find_mirror_vma(vma))
69854 + BUG_ON(!trylock_page(page));
69855 +#endif
69856 +
69857 inc_mm_counter_fast(mm, MM_ANONPAGES);
69858 page_add_new_anon_rmap(page, vma, address);
69859 setpte:
69860 @@ -3135,6 +3315,12 @@ setpte:
69861
69862 /* No need to invalidate - it was non-present before */
69863 update_mmu_cache(vma, address, page_table);
69864 +
69865 +#ifdef CONFIG_PAX_SEGMEXEC
69866 + if (page)
69867 + pax_mirror_anon_pte(vma, address, page, ptl);
69868 +#endif
69869 +
69870 unlock:
69871 pte_unmap_unlock(page_table, ptl);
69872 return 0;
69873 @@ -3278,6 +3464,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69874 */
69875 /* Only go through if we didn't race with anybody else... */
69876 if (likely(pte_same(*page_table, orig_pte))) {
69877 +
69878 +#ifdef CONFIG_PAX_SEGMEXEC
69879 + if (anon && pax_find_mirror_vma(vma))
69880 + BUG_ON(!trylock_page(page));
69881 +#endif
69882 +
69883 flush_icache_page(vma, page);
69884 entry = mk_pte(page, vma->vm_page_prot);
69885 if (flags & FAULT_FLAG_WRITE)
69886 @@ -3297,6 +3489,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69887
69888 /* no need to invalidate: a not-present page won't be cached */
69889 update_mmu_cache(vma, address, page_table);
69890 +
69891 +#ifdef CONFIG_PAX_SEGMEXEC
69892 + if (anon)
69893 + pax_mirror_anon_pte(vma, address, page, ptl);
69894 + else
69895 + pax_mirror_file_pte(vma, address, page, ptl);
69896 +#endif
69897 +
69898 } else {
69899 if (cow_page)
69900 mem_cgroup_uncharge_page(cow_page);
69901 @@ -3450,6 +3650,12 @@ int handle_pte_fault(struct mm_struct *mm,
69902 if (flags & FAULT_FLAG_WRITE)
69903 flush_tlb_fix_spurious_fault(vma, address);
69904 }
69905 +
69906 +#ifdef CONFIG_PAX_SEGMEXEC
69907 + pax_mirror_pte(vma, address, pte, pmd, ptl);
69908 + return 0;
69909 +#endif
69910 +
69911 unlock:
69912 pte_unmap_unlock(pte, ptl);
69913 return 0;
69914 @@ -3466,6 +3672,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69915 pmd_t *pmd;
69916 pte_t *pte;
69917
69918 +#ifdef CONFIG_PAX_SEGMEXEC
69919 + struct vm_area_struct *vma_m;
69920 +#endif
69921 +
69922 __set_current_state(TASK_RUNNING);
69923
69924 count_vm_event(PGFAULT);
69925 @@ -3477,6 +3687,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69926 if (unlikely(is_vm_hugetlb_page(vma)))
69927 return hugetlb_fault(mm, vma, address, flags);
69928
69929 +#ifdef CONFIG_PAX_SEGMEXEC
69930 + vma_m = pax_find_mirror_vma(vma);
69931 + if (vma_m) {
69932 + unsigned long address_m;
69933 + pgd_t *pgd_m;
69934 + pud_t *pud_m;
69935 + pmd_t *pmd_m;
69936 +
69937 + if (vma->vm_start > vma_m->vm_start) {
69938 + address_m = address;
69939 + address -= SEGMEXEC_TASK_SIZE;
69940 + vma = vma_m;
69941 + } else
69942 + address_m = address + SEGMEXEC_TASK_SIZE;
69943 +
69944 + pgd_m = pgd_offset(mm, address_m);
69945 + pud_m = pud_alloc(mm, pgd_m, address_m);
69946 + if (!pud_m)
69947 + return VM_FAULT_OOM;
69948 + pmd_m = pmd_alloc(mm, pud_m, address_m);
69949 + if (!pmd_m)
69950 + return VM_FAULT_OOM;
69951 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
69952 + return VM_FAULT_OOM;
69953 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
69954 + }
69955 +#endif
69956 +
69957 pgd = pgd_offset(mm, address);
69958 pud = pud_alloc(mm, pgd, address);
69959 if (!pud)
69960 @@ -3506,7 +3744,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69961 * run pte_offset_map on the pmd, if an huge pmd could
69962 * materialize from under us from a different thread.
69963 */
69964 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
69965 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69966 return VM_FAULT_OOM;
69967 /* if an huge pmd materialized from under us just retry later */
69968 if (unlikely(pmd_trans_huge(*pmd)))
69969 @@ -3610,7 +3848,7 @@ static int __init gate_vma_init(void)
69970 gate_vma.vm_start = FIXADDR_USER_START;
69971 gate_vma.vm_end = FIXADDR_USER_END;
69972 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
69973 - gate_vma.vm_page_prot = __P101;
69974 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
69975 /*
69976 * Make sure the vDSO gets into every core dump.
69977 * Dumping its contents makes post-mortem fully interpretable later
69978 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
69979 index 47296fe..5c3d263 100644
69980 --- a/mm/mempolicy.c
69981 +++ b/mm/mempolicy.c
69982 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
69983 unsigned long vmstart;
69984 unsigned long vmend;
69985
69986 +#ifdef CONFIG_PAX_SEGMEXEC
69987 + struct vm_area_struct *vma_m;
69988 +#endif
69989 +
69990 vma = find_vma(mm, start);
69991 if (!vma || vma->vm_start > start)
69992 return -EFAULT;
69993 @@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
69994 err = policy_vma(vma, new_pol);
69995 if (err)
69996 goto out;
69997 +
69998 +#ifdef CONFIG_PAX_SEGMEXEC
69999 + vma_m = pax_find_mirror_vma(vma);
70000 + if (vma_m) {
70001 + err = policy_vma(vma_m, new_pol);
70002 + if (err)
70003 + goto out;
70004 + }
70005 +#endif
70006 +
70007 }
70008
70009 out:
70010 @@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70011
70012 if (end < start)
70013 return -EINVAL;
70014 +
70015 +#ifdef CONFIG_PAX_SEGMEXEC
70016 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70017 + if (end > SEGMEXEC_TASK_SIZE)
70018 + return -EINVAL;
70019 + } else
70020 +#endif
70021 +
70022 + if (end > TASK_SIZE)
70023 + return -EINVAL;
70024 +
70025 if (end == start)
70026 return 0;
70027
70028 @@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70029 if (!mm)
70030 goto out;
70031
70032 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70033 + if (mm != current->mm &&
70034 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70035 + err = -EPERM;
70036 + goto out;
70037 + }
70038 +#endif
70039 +
70040 /*
70041 * Check if this process has the right to modify the specified
70042 * process. The right exists if the process has administrative
70043 @@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70044 rcu_read_lock();
70045 tcred = __task_cred(task);
70046 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70047 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70048 - !capable(CAP_SYS_NICE)) {
70049 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70050 rcu_read_unlock();
70051 err = -EPERM;
70052 goto out;
70053 diff --git a/mm/migrate.c b/mm/migrate.c
70054 index 1503b6b..156c672 100644
70055 --- a/mm/migrate.c
70056 +++ b/mm/migrate.c
70057 @@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70058 if (!mm)
70059 return -EINVAL;
70060
70061 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70062 + if (mm != current->mm &&
70063 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70064 + err = -EPERM;
70065 + goto out;
70066 + }
70067 +#endif
70068 +
70069 /*
70070 * Check if this process has the right to modify the specified
70071 * process. The right exists if the process has administrative
70072 @@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70073 rcu_read_lock();
70074 tcred = __task_cred(task);
70075 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70076 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70077 - !capable(CAP_SYS_NICE)) {
70078 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70079 rcu_read_unlock();
70080 err = -EPERM;
70081 goto out;
70082 diff --git a/mm/mlock.c b/mm/mlock.c
70083 index ef726e8..13e0901 100644
70084 --- a/mm/mlock.c
70085 +++ b/mm/mlock.c
70086 @@ -13,6 +13,7 @@
70087 #include <linux/pagemap.h>
70088 #include <linux/mempolicy.h>
70089 #include <linux/syscalls.h>
70090 +#include <linux/security.h>
70091 #include <linux/sched.h>
70092 #include <linux/export.h>
70093 #include <linux/rmap.h>
70094 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70095 return -EINVAL;
70096 if (end == start)
70097 return 0;
70098 + if (end > TASK_SIZE)
70099 + return -EINVAL;
70100 +
70101 vma = find_vma(current->mm, start);
70102 if (!vma || vma->vm_start > start)
70103 return -ENOMEM;
70104 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70105 for (nstart = start ; ; ) {
70106 vm_flags_t newflags;
70107
70108 +#ifdef CONFIG_PAX_SEGMEXEC
70109 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70110 + break;
70111 +#endif
70112 +
70113 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70114
70115 newflags = vma->vm_flags | VM_LOCKED;
70116 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70117 lock_limit >>= PAGE_SHIFT;
70118
70119 /* check against resource limits */
70120 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70121 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70122 error = do_mlock(start, len, 1);
70123 up_write(&current->mm->mmap_sem);
70124 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70125 static int do_mlockall(int flags)
70126 {
70127 struct vm_area_struct * vma, * prev = NULL;
70128 - unsigned int def_flags = 0;
70129
70130 if (flags & MCL_FUTURE)
70131 - def_flags = VM_LOCKED;
70132 - current->mm->def_flags = def_flags;
70133 + current->mm->def_flags |= VM_LOCKED;
70134 + else
70135 + current->mm->def_flags &= ~VM_LOCKED;
70136 if (flags == MCL_FUTURE)
70137 goto out;
70138
70139 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70140 vm_flags_t newflags;
70141
70142 +#ifdef CONFIG_PAX_SEGMEXEC
70143 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70144 + break;
70145 +#endif
70146 +
70147 + BUG_ON(vma->vm_end > TASK_SIZE);
70148 newflags = vma->vm_flags | VM_LOCKED;
70149 if (!(flags & MCL_CURRENT))
70150 newflags &= ~VM_LOCKED;
70151 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70152 lock_limit >>= PAGE_SHIFT;
70153
70154 ret = -ENOMEM;
70155 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70156 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70157 capable(CAP_IPC_LOCK))
70158 ret = do_mlockall(flags);
70159 diff --git a/mm/mmap.c b/mm/mmap.c
70160 index da15a79..2e3d9ff 100644
70161 --- a/mm/mmap.c
70162 +++ b/mm/mmap.c
70163 @@ -46,6 +46,16 @@
70164 #define arch_rebalance_pgtables(addr, len) (addr)
70165 #endif
70166
70167 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70168 +{
70169 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70170 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70171 + up_read(&mm->mmap_sem);
70172 + BUG();
70173 + }
70174 +#endif
70175 +}
70176 +
70177 static void unmap_region(struct mm_struct *mm,
70178 struct vm_area_struct *vma, struct vm_area_struct *prev,
70179 unsigned long start, unsigned long end);
70180 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70181 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70182 *
70183 */
70184 -pgprot_t protection_map[16] = {
70185 +pgprot_t protection_map[16] __read_only = {
70186 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70187 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70188 };
70189
70190 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70191 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70192 {
70193 - return __pgprot(pgprot_val(protection_map[vm_flags &
70194 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70195 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70196 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70197 +
70198 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70199 + if (!(__supported_pte_mask & _PAGE_NX) &&
70200 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70201 + (vm_flags & (VM_READ | VM_WRITE)))
70202 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70203 +#endif
70204 +
70205 + return prot;
70206 }
70207 EXPORT_SYMBOL(vm_get_page_prot);
70208
70209 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70210 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70211 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70212 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70213 /*
70214 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70215 * other variables. It can be updated by several CPUs frequently.
70216 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70217 struct vm_area_struct *next = vma->vm_next;
70218
70219 might_sleep();
70220 + BUG_ON(vma->vm_mirror);
70221 if (vma->vm_ops && vma->vm_ops->close)
70222 vma->vm_ops->close(vma);
70223 if (vma->vm_file) {
70224 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70225 * not page aligned -Ram Gupta
70226 */
70227 rlim = rlimit(RLIMIT_DATA);
70228 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70229 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70230 (mm->end_data - mm->start_data) > rlim)
70231 goto out;
70232 @@ -689,6 +711,12 @@ static int
70233 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70234 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70235 {
70236 +
70237 +#ifdef CONFIG_PAX_SEGMEXEC
70238 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70239 + return 0;
70240 +#endif
70241 +
70242 if (is_mergeable_vma(vma, file, vm_flags) &&
70243 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70244 if (vma->vm_pgoff == vm_pgoff)
70245 @@ -708,6 +736,12 @@ static int
70246 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70247 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70248 {
70249 +
70250 +#ifdef CONFIG_PAX_SEGMEXEC
70251 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70252 + return 0;
70253 +#endif
70254 +
70255 if (is_mergeable_vma(vma, file, vm_flags) &&
70256 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70257 pgoff_t vm_pglen;
70258 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70259 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70260 struct vm_area_struct *prev, unsigned long addr,
70261 unsigned long end, unsigned long vm_flags,
70262 - struct anon_vma *anon_vma, struct file *file,
70263 + struct anon_vma *anon_vma, struct file *file,
70264 pgoff_t pgoff, struct mempolicy *policy)
70265 {
70266 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70267 struct vm_area_struct *area, *next;
70268 int err;
70269
70270 +#ifdef CONFIG_PAX_SEGMEXEC
70271 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70272 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70273 +
70274 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70275 +#endif
70276 +
70277 /*
70278 * We later require that vma->vm_flags == vm_flags,
70279 * so this tests vma->vm_flags & VM_SPECIAL, too.
70280 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70281 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70282 next = next->vm_next;
70283
70284 +#ifdef CONFIG_PAX_SEGMEXEC
70285 + if (prev)
70286 + prev_m = pax_find_mirror_vma(prev);
70287 + if (area)
70288 + area_m = pax_find_mirror_vma(area);
70289 + if (next)
70290 + next_m = pax_find_mirror_vma(next);
70291 +#endif
70292 +
70293 /*
70294 * Can it merge with the predecessor?
70295 */
70296 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70297 /* cases 1, 6 */
70298 err = vma_adjust(prev, prev->vm_start,
70299 next->vm_end, prev->vm_pgoff, NULL);
70300 - } else /* cases 2, 5, 7 */
70301 +
70302 +#ifdef CONFIG_PAX_SEGMEXEC
70303 + if (!err && prev_m)
70304 + err = vma_adjust(prev_m, prev_m->vm_start,
70305 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70306 +#endif
70307 +
70308 + } else { /* cases 2, 5, 7 */
70309 err = vma_adjust(prev, prev->vm_start,
70310 end, prev->vm_pgoff, NULL);
70311 +
70312 +#ifdef CONFIG_PAX_SEGMEXEC
70313 + if (!err && prev_m)
70314 + err = vma_adjust(prev_m, prev_m->vm_start,
70315 + end_m, prev_m->vm_pgoff, NULL);
70316 +#endif
70317 +
70318 + }
70319 if (err)
70320 return NULL;
70321 khugepaged_enter_vma_merge(prev);
70322 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70323 mpol_equal(policy, vma_policy(next)) &&
70324 can_vma_merge_before(next, vm_flags,
70325 anon_vma, file, pgoff+pglen)) {
70326 - if (prev && addr < prev->vm_end) /* case 4 */
70327 + if (prev && addr < prev->vm_end) { /* case 4 */
70328 err = vma_adjust(prev, prev->vm_start,
70329 addr, prev->vm_pgoff, NULL);
70330 - else /* cases 3, 8 */
70331 +
70332 +#ifdef CONFIG_PAX_SEGMEXEC
70333 + if (!err && prev_m)
70334 + err = vma_adjust(prev_m, prev_m->vm_start,
70335 + addr_m, prev_m->vm_pgoff, NULL);
70336 +#endif
70337 +
70338 + } else { /* cases 3, 8 */
70339 err = vma_adjust(area, addr, next->vm_end,
70340 next->vm_pgoff - pglen, NULL);
70341 +
70342 +#ifdef CONFIG_PAX_SEGMEXEC
70343 + if (!err && area_m)
70344 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70345 + next_m->vm_pgoff - pglen, NULL);
70346 +#endif
70347 +
70348 + }
70349 if (err)
70350 return NULL;
70351 khugepaged_enter_vma_merge(area);
70352 @@ -921,14 +1001,11 @@ none:
70353 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70354 struct file *file, long pages)
70355 {
70356 - const unsigned long stack_flags
70357 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70358 -
70359 if (file) {
70360 mm->shared_vm += pages;
70361 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70362 mm->exec_vm += pages;
70363 - } else if (flags & stack_flags)
70364 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70365 mm->stack_vm += pages;
70366 if (flags & (VM_RESERVED|VM_IO))
70367 mm->reserved_vm += pages;
70368 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70369 * (the exception is when the underlying filesystem is noexec
70370 * mounted, in which case we dont add PROT_EXEC.)
70371 */
70372 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70373 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70374 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70375 prot |= PROT_EXEC;
70376
70377 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70378 /* Obtain the address to map to. we verify (or select) it and ensure
70379 * that it represents a valid section of the address space.
70380 */
70381 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70382 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70383 if (addr & ~PAGE_MASK)
70384 return addr;
70385
70386 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70387 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70388 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70389
70390 +#ifdef CONFIG_PAX_MPROTECT
70391 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70392 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70393 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70394 + gr_log_rwxmmap(file);
70395 +
70396 +#ifdef CONFIG_PAX_EMUPLT
70397 + vm_flags &= ~VM_EXEC;
70398 +#else
70399 + return -EPERM;
70400 +#endif
70401 +
70402 + }
70403 +
70404 + if (!(vm_flags & VM_EXEC))
70405 + vm_flags &= ~VM_MAYEXEC;
70406 +#else
70407 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70408 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70409 +#endif
70410 + else
70411 + vm_flags &= ~VM_MAYWRITE;
70412 + }
70413 +#endif
70414 +
70415 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70416 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70417 + vm_flags &= ~VM_PAGEEXEC;
70418 +#endif
70419 +
70420 if (flags & MAP_LOCKED)
70421 if (!can_do_mlock())
70422 return -EPERM;
70423 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70424 locked += mm->locked_vm;
70425 lock_limit = rlimit(RLIMIT_MEMLOCK);
70426 lock_limit >>= PAGE_SHIFT;
70427 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70428 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70429 return -EAGAIN;
70430 }
70431 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70432 if (error)
70433 return error;
70434
70435 + if (!gr_acl_handle_mmap(file, prot))
70436 + return -EACCES;
70437 +
70438 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70439 }
70440 EXPORT_SYMBOL(do_mmap_pgoff);
70441 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70442 vm_flags_t vm_flags = vma->vm_flags;
70443
70444 /* If it was private or non-writable, the write bit is already clear */
70445 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70446 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70447 return 0;
70448
70449 /* The backer wishes to know when pages are first written to? */
70450 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70451 unsigned long charged = 0;
70452 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70453
70454 +#ifdef CONFIG_PAX_SEGMEXEC
70455 + struct vm_area_struct *vma_m = NULL;
70456 +#endif
70457 +
70458 + /*
70459 + * mm->mmap_sem is required to protect against another thread
70460 + * changing the mappings in case we sleep.
70461 + */
70462 + verify_mm_writelocked(mm);
70463 +
70464 /* Clear old maps */
70465 error = -ENOMEM;
70466 -munmap_back:
70467 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70468 if (vma && vma->vm_start < addr + len) {
70469 if (do_munmap(mm, addr, len))
70470 return -ENOMEM;
70471 - goto munmap_back;
70472 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70473 + BUG_ON(vma && vma->vm_start < addr + len);
70474 }
70475
70476 /* Check against address space limit. */
70477 @@ -1258,6 +1379,16 @@ munmap_back:
70478 goto unacct_error;
70479 }
70480
70481 +#ifdef CONFIG_PAX_SEGMEXEC
70482 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70483 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70484 + if (!vma_m) {
70485 + error = -ENOMEM;
70486 + goto free_vma;
70487 + }
70488 + }
70489 +#endif
70490 +
70491 vma->vm_mm = mm;
70492 vma->vm_start = addr;
70493 vma->vm_end = addr + len;
70494 @@ -1282,6 +1413,19 @@ munmap_back:
70495 error = file->f_op->mmap(file, vma);
70496 if (error)
70497 goto unmap_and_free_vma;
70498 +
70499 +#ifdef CONFIG_PAX_SEGMEXEC
70500 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70501 + added_exe_file_vma(mm);
70502 +#endif
70503 +
70504 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70505 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70506 + vma->vm_flags |= VM_PAGEEXEC;
70507 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70508 + }
70509 +#endif
70510 +
70511 if (vm_flags & VM_EXECUTABLE)
70512 added_exe_file_vma(mm);
70513
70514 @@ -1319,6 +1463,11 @@ munmap_back:
70515 vma_link(mm, vma, prev, rb_link, rb_parent);
70516 file = vma->vm_file;
70517
70518 +#ifdef CONFIG_PAX_SEGMEXEC
70519 + if (vma_m)
70520 + BUG_ON(pax_mirror_vma(vma_m, vma));
70521 +#endif
70522 +
70523 /* Once vma denies write, undo our temporary denial count */
70524 if (correct_wcount)
70525 atomic_inc(&inode->i_writecount);
70526 @@ -1327,6 +1476,7 @@ out:
70527
70528 mm->total_vm += len >> PAGE_SHIFT;
70529 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70530 + track_exec_limit(mm, addr, addr + len, vm_flags);
70531 if (vm_flags & VM_LOCKED) {
70532 if (!mlock_vma_pages_range(vma, addr, addr + len))
70533 mm->locked_vm += (len >> PAGE_SHIFT);
70534 @@ -1344,6 +1494,12 @@ unmap_and_free_vma:
70535 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70536 charged = 0;
70537 free_vma:
70538 +
70539 +#ifdef CONFIG_PAX_SEGMEXEC
70540 + if (vma_m)
70541 + kmem_cache_free(vm_area_cachep, vma_m);
70542 +#endif
70543 +
70544 kmem_cache_free(vm_area_cachep, vma);
70545 unacct_error:
70546 if (charged)
70547 @@ -1351,6 +1507,44 @@ unacct_error:
70548 return error;
70549 }
70550
70551 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70552 +{
70553 + if (!vma) {
70554 +#ifdef CONFIG_STACK_GROWSUP
70555 + if (addr > sysctl_heap_stack_gap)
70556 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70557 + else
70558 + vma = find_vma(current->mm, 0);
70559 + if (vma && (vma->vm_flags & VM_GROWSUP))
70560 + return false;
70561 +#endif
70562 + return true;
70563 + }
70564 +
70565 + if (addr + len > vma->vm_start)
70566 + return false;
70567 +
70568 + if (vma->vm_flags & VM_GROWSDOWN)
70569 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70570 +#ifdef CONFIG_STACK_GROWSUP
70571 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70572 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70573 +#endif
70574 +
70575 + return true;
70576 +}
70577 +
70578 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70579 +{
70580 + if (vma->vm_start < len)
70581 + return -ENOMEM;
70582 + if (!(vma->vm_flags & VM_GROWSDOWN))
70583 + return vma->vm_start - len;
70584 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70585 + return vma->vm_start - len - sysctl_heap_stack_gap;
70586 + return -ENOMEM;
70587 +}
70588 +
70589 /* Get an address range which is currently unmapped.
70590 * For shmat() with addr=0.
70591 *
70592 @@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70593 if (flags & MAP_FIXED)
70594 return addr;
70595
70596 +#ifdef CONFIG_PAX_RANDMMAP
70597 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70598 +#endif
70599 +
70600 if (addr) {
70601 addr = PAGE_ALIGN(addr);
70602 - vma = find_vma(mm, addr);
70603 - if (TASK_SIZE - len >= addr &&
70604 - (!vma || addr + len <= vma->vm_start))
70605 - return addr;
70606 + if (TASK_SIZE - len >= addr) {
70607 + vma = find_vma(mm, addr);
70608 + if (check_heap_stack_gap(vma, addr, len))
70609 + return addr;
70610 + }
70611 }
70612 if (len > mm->cached_hole_size) {
70613 - start_addr = addr = mm->free_area_cache;
70614 + start_addr = addr = mm->free_area_cache;
70615 } else {
70616 - start_addr = addr = TASK_UNMAPPED_BASE;
70617 - mm->cached_hole_size = 0;
70618 + start_addr = addr = mm->mmap_base;
70619 + mm->cached_hole_size = 0;
70620 }
70621
70622 full_search:
70623 @@ -1399,34 +1598,40 @@ full_search:
70624 * Start a new search - just in case we missed
70625 * some holes.
70626 */
70627 - if (start_addr != TASK_UNMAPPED_BASE) {
70628 - addr = TASK_UNMAPPED_BASE;
70629 - start_addr = addr;
70630 + if (start_addr != mm->mmap_base) {
70631 + start_addr = addr = mm->mmap_base;
70632 mm->cached_hole_size = 0;
70633 goto full_search;
70634 }
70635 return -ENOMEM;
70636 }
70637 - if (!vma || addr + len <= vma->vm_start) {
70638 - /*
70639 - * Remember the place where we stopped the search:
70640 - */
70641 - mm->free_area_cache = addr + len;
70642 - return addr;
70643 - }
70644 + if (check_heap_stack_gap(vma, addr, len))
70645 + break;
70646 if (addr + mm->cached_hole_size < vma->vm_start)
70647 mm->cached_hole_size = vma->vm_start - addr;
70648 addr = vma->vm_end;
70649 }
70650 +
70651 + /*
70652 + * Remember the place where we stopped the search:
70653 + */
70654 + mm->free_area_cache = addr + len;
70655 + return addr;
70656 }
70657 #endif
70658
70659 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
70660 {
70661 +
70662 +#ifdef CONFIG_PAX_SEGMEXEC
70663 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70664 + return;
70665 +#endif
70666 +
70667 /*
70668 * Is this a new hole at the lowest possible address?
70669 */
70670 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
70671 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
70672 mm->free_area_cache = addr;
70673 mm->cached_hole_size = ~0UL;
70674 }
70675 @@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70676 {
70677 struct vm_area_struct *vma;
70678 struct mm_struct *mm = current->mm;
70679 - unsigned long addr = addr0;
70680 + unsigned long base = mm->mmap_base, addr = addr0;
70681
70682 /* requested length too big for entire address space */
70683 if (len > TASK_SIZE)
70684 @@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70685 if (flags & MAP_FIXED)
70686 return addr;
70687
70688 +#ifdef CONFIG_PAX_RANDMMAP
70689 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70690 +#endif
70691 +
70692 /* requesting a specific address */
70693 if (addr) {
70694 addr = PAGE_ALIGN(addr);
70695 - vma = find_vma(mm, addr);
70696 - if (TASK_SIZE - len >= addr &&
70697 - (!vma || addr + len <= vma->vm_start))
70698 - return addr;
70699 + if (TASK_SIZE - len >= addr) {
70700 + vma = find_vma(mm, addr);
70701 + if (check_heap_stack_gap(vma, addr, len))
70702 + return addr;
70703 + }
70704 }
70705
70706 /* check if free_area_cache is useful for us */
70707 @@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70708 /* make sure it can fit in the remaining address space */
70709 if (addr > len) {
70710 vma = find_vma(mm, addr-len);
70711 - if (!vma || addr <= vma->vm_start)
70712 + if (check_heap_stack_gap(vma, addr - len, len))
70713 /* remember the address as a hint for next time */
70714 return (mm->free_area_cache = addr-len);
70715 }
70716 @@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70717 * return with success:
70718 */
70719 vma = find_vma(mm, addr);
70720 - if (!vma || addr+len <= vma->vm_start)
70721 + if (check_heap_stack_gap(vma, addr, len))
70722 /* remember the address as a hint for next time */
70723 return (mm->free_area_cache = addr);
70724
70725 @@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70726 mm->cached_hole_size = vma->vm_start - addr;
70727
70728 /* try just below the current vma->vm_start */
70729 - addr = vma->vm_start-len;
70730 - } while (len < vma->vm_start);
70731 + addr = skip_heap_stack_gap(vma, len);
70732 + } while (!IS_ERR_VALUE(addr));
70733
70734 bottomup:
70735 /*
70736 @@ -1510,13 +1720,21 @@ bottomup:
70737 * can happen with large stack limits and large mmap()
70738 * allocations.
70739 */
70740 + mm->mmap_base = TASK_UNMAPPED_BASE;
70741 +
70742 +#ifdef CONFIG_PAX_RANDMMAP
70743 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70744 + mm->mmap_base += mm->delta_mmap;
70745 +#endif
70746 +
70747 + mm->free_area_cache = mm->mmap_base;
70748 mm->cached_hole_size = ~0UL;
70749 - mm->free_area_cache = TASK_UNMAPPED_BASE;
70750 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
70751 /*
70752 * Restore the topdown base:
70753 */
70754 - mm->free_area_cache = mm->mmap_base;
70755 + mm->mmap_base = base;
70756 + mm->free_area_cache = base;
70757 mm->cached_hole_size = ~0UL;
70758
70759 return addr;
70760 @@ -1525,6 +1743,12 @@ bottomup:
70761
70762 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70763 {
70764 +
70765 +#ifdef CONFIG_PAX_SEGMEXEC
70766 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70767 + return;
70768 +#endif
70769 +
70770 /*
70771 * Is this a new hole at the highest possible address?
70772 */
70773 @@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70774 mm->free_area_cache = addr;
70775
70776 /* dont allow allocations above current base */
70777 - if (mm->free_area_cache > mm->mmap_base)
70778 + if (mm->free_area_cache > mm->mmap_base) {
70779 mm->free_area_cache = mm->mmap_base;
70780 + mm->cached_hole_size = ~0UL;
70781 + }
70782 }
70783
70784 unsigned long
70785 @@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
70786 return vma;
70787 }
70788
70789 +#ifdef CONFIG_PAX_SEGMEXEC
70790 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
70791 +{
70792 + struct vm_area_struct *vma_m;
70793 +
70794 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
70795 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
70796 + BUG_ON(vma->vm_mirror);
70797 + return NULL;
70798 + }
70799 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
70800 + vma_m = vma->vm_mirror;
70801 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
70802 + BUG_ON(vma->vm_file != vma_m->vm_file);
70803 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
70804 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
70805 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
70806 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
70807 + return vma_m;
70808 +}
70809 +#endif
70810 +
70811 /*
70812 * Verify that the stack growth is acceptable and
70813 * update accounting. This is shared with both the
70814 @@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70815 return -ENOMEM;
70816
70817 /* Stack limit test */
70818 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
70819 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
70820 return -ENOMEM;
70821
70822 @@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70823 locked = mm->locked_vm + grow;
70824 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
70825 limit >>= PAGE_SHIFT;
70826 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70827 if (locked > limit && !capable(CAP_IPC_LOCK))
70828 return -ENOMEM;
70829 }
70830 @@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70831 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
70832 * vma is the last one with address > vma->vm_end. Have to extend vma.
70833 */
70834 +#ifndef CONFIG_IA64
70835 +static
70836 +#endif
70837 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70838 {
70839 int error;
70840 + bool locknext;
70841
70842 if (!(vma->vm_flags & VM_GROWSUP))
70843 return -EFAULT;
70844
70845 + /* Also guard against wrapping around to address 0. */
70846 + if (address < PAGE_ALIGN(address+1))
70847 + address = PAGE_ALIGN(address+1);
70848 + else
70849 + return -ENOMEM;
70850 +
70851 /*
70852 * We must make sure the anon_vma is allocated
70853 * so that the anon_vma locking is not a noop.
70854 */
70855 if (unlikely(anon_vma_prepare(vma)))
70856 return -ENOMEM;
70857 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
70858 + if (locknext && anon_vma_prepare(vma->vm_next))
70859 + return -ENOMEM;
70860 vma_lock_anon_vma(vma);
70861 + if (locknext)
70862 + vma_lock_anon_vma(vma->vm_next);
70863
70864 /*
70865 * vma->vm_start/vm_end cannot change under us because the caller
70866 * is required to hold the mmap_sem in read mode. We need the
70867 - * anon_vma lock to serialize against concurrent expand_stacks.
70868 - * Also guard against wrapping around to address 0.
70869 + * anon_vma locks to serialize against concurrent expand_stacks
70870 + * and expand_upwards.
70871 */
70872 - if (address < PAGE_ALIGN(address+4))
70873 - address = PAGE_ALIGN(address+4);
70874 - else {
70875 - vma_unlock_anon_vma(vma);
70876 - return -ENOMEM;
70877 - }
70878 error = 0;
70879
70880 /* Somebody else might have raced and expanded it already */
70881 - if (address > vma->vm_end) {
70882 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
70883 + error = -ENOMEM;
70884 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
70885 unsigned long size, grow;
70886
70887 size = address - vma->vm_start;
70888 @@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70889 }
70890 }
70891 }
70892 + if (locknext)
70893 + vma_unlock_anon_vma(vma->vm_next);
70894 vma_unlock_anon_vma(vma);
70895 khugepaged_enter_vma_merge(vma);
70896 return error;
70897 @@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
70898 unsigned long address)
70899 {
70900 int error;
70901 + bool lockprev = false;
70902 + struct vm_area_struct *prev;
70903
70904 /*
70905 * We must make sure the anon_vma is allocated
70906 @@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
70907 if (error)
70908 return error;
70909
70910 + prev = vma->vm_prev;
70911 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
70912 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
70913 +#endif
70914 + if (lockprev && anon_vma_prepare(prev))
70915 + return -ENOMEM;
70916 + if (lockprev)
70917 + vma_lock_anon_vma(prev);
70918 +
70919 vma_lock_anon_vma(vma);
70920
70921 /*
70922 @@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
70923 */
70924
70925 /* Somebody else might have raced and expanded it already */
70926 - if (address < vma->vm_start) {
70927 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
70928 + error = -ENOMEM;
70929 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
70930 unsigned long size, grow;
70931
70932 +#ifdef CONFIG_PAX_SEGMEXEC
70933 + struct vm_area_struct *vma_m;
70934 +
70935 + vma_m = pax_find_mirror_vma(vma);
70936 +#endif
70937 +
70938 size = vma->vm_end - address;
70939 grow = (vma->vm_start - address) >> PAGE_SHIFT;
70940
70941 @@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
70942 if (!error) {
70943 vma->vm_start = address;
70944 vma->vm_pgoff -= grow;
70945 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
70946 +
70947 +#ifdef CONFIG_PAX_SEGMEXEC
70948 + if (vma_m) {
70949 + vma_m->vm_start -= grow << PAGE_SHIFT;
70950 + vma_m->vm_pgoff -= grow;
70951 + }
70952 +#endif
70953 +
70954 perf_event_mmap(vma);
70955 }
70956 }
70957 }
70958 vma_unlock_anon_vma(vma);
70959 + if (lockprev)
70960 + vma_unlock_anon_vma(prev);
70961 khugepaged_enter_vma_merge(vma);
70962 return error;
70963 }
70964 @@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
70965 do {
70966 long nrpages = vma_pages(vma);
70967
70968 +#ifdef CONFIG_PAX_SEGMEXEC
70969 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
70970 + vma = remove_vma(vma);
70971 + continue;
70972 + }
70973 +#endif
70974 +
70975 mm->total_vm -= nrpages;
70976 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
70977 vma = remove_vma(vma);
70978 @@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
70979 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
70980 vma->vm_prev = NULL;
70981 do {
70982 +
70983 +#ifdef CONFIG_PAX_SEGMEXEC
70984 + if (vma->vm_mirror) {
70985 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
70986 + vma->vm_mirror->vm_mirror = NULL;
70987 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
70988 + vma->vm_mirror = NULL;
70989 + }
70990 +#endif
70991 +
70992 rb_erase(&vma->vm_rb, &mm->mm_rb);
70993 mm->map_count--;
70994 tail_vma = vma;
70995 @@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
70996 struct vm_area_struct *new;
70997 int err = -ENOMEM;
70998
70999 +#ifdef CONFIG_PAX_SEGMEXEC
71000 + struct vm_area_struct *vma_m, *new_m = NULL;
71001 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71002 +#endif
71003 +
71004 if (is_vm_hugetlb_page(vma) && (addr &
71005 ~(huge_page_mask(hstate_vma(vma)))))
71006 return -EINVAL;
71007
71008 +#ifdef CONFIG_PAX_SEGMEXEC
71009 + vma_m = pax_find_mirror_vma(vma);
71010 +#endif
71011 +
71012 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71013 if (!new)
71014 goto out_err;
71015
71016 +#ifdef CONFIG_PAX_SEGMEXEC
71017 + if (vma_m) {
71018 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71019 + if (!new_m) {
71020 + kmem_cache_free(vm_area_cachep, new);
71021 + goto out_err;
71022 + }
71023 + }
71024 +#endif
71025 +
71026 /* most fields are the same, copy all, and then fixup */
71027 *new = *vma;
71028
71029 @@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71030 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71031 }
71032
71033 +#ifdef CONFIG_PAX_SEGMEXEC
71034 + if (vma_m) {
71035 + *new_m = *vma_m;
71036 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71037 + new_m->vm_mirror = new;
71038 + new->vm_mirror = new_m;
71039 +
71040 + if (new_below)
71041 + new_m->vm_end = addr_m;
71042 + else {
71043 + new_m->vm_start = addr_m;
71044 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71045 + }
71046 + }
71047 +#endif
71048 +
71049 pol = mpol_dup(vma_policy(vma));
71050 if (IS_ERR(pol)) {
71051 err = PTR_ERR(pol);
71052 @@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71053 else
71054 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71055
71056 +#ifdef CONFIG_PAX_SEGMEXEC
71057 + if (!err && vma_m) {
71058 + if (anon_vma_clone(new_m, vma_m))
71059 + goto out_free_mpol;
71060 +
71061 + mpol_get(pol);
71062 + vma_set_policy(new_m, pol);
71063 +
71064 + if (new_m->vm_file) {
71065 + get_file(new_m->vm_file);
71066 + if (vma_m->vm_flags & VM_EXECUTABLE)
71067 + added_exe_file_vma(mm);
71068 + }
71069 +
71070 + if (new_m->vm_ops && new_m->vm_ops->open)
71071 + new_m->vm_ops->open(new_m);
71072 +
71073 + if (new_below)
71074 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71075 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71076 + else
71077 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71078 +
71079 + if (err) {
71080 + if (new_m->vm_ops && new_m->vm_ops->close)
71081 + new_m->vm_ops->close(new_m);
71082 + if (new_m->vm_file) {
71083 + if (vma_m->vm_flags & VM_EXECUTABLE)
71084 + removed_exe_file_vma(mm);
71085 + fput(new_m->vm_file);
71086 + }
71087 + mpol_put(pol);
71088 + }
71089 + }
71090 +#endif
71091 +
71092 /* Success. */
71093 if (!err)
71094 return 0;
71095 @@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71096 removed_exe_file_vma(mm);
71097 fput(new->vm_file);
71098 }
71099 - unlink_anon_vmas(new);
71100 out_free_mpol:
71101 mpol_put(pol);
71102 out_free_vma:
71103 +
71104 +#ifdef CONFIG_PAX_SEGMEXEC
71105 + if (new_m) {
71106 + unlink_anon_vmas(new_m);
71107 + kmem_cache_free(vm_area_cachep, new_m);
71108 + }
71109 +#endif
71110 +
71111 + unlink_anon_vmas(new);
71112 kmem_cache_free(vm_area_cachep, new);
71113 out_err:
71114 return err;
71115 @@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71116 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71117 unsigned long addr, int new_below)
71118 {
71119 +
71120 +#ifdef CONFIG_PAX_SEGMEXEC
71121 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71122 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71123 + if (mm->map_count >= sysctl_max_map_count-1)
71124 + return -ENOMEM;
71125 + } else
71126 +#endif
71127 +
71128 if (mm->map_count >= sysctl_max_map_count)
71129 return -ENOMEM;
71130
71131 @@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71132 * work. This now handles partial unmappings.
71133 * Jeremy Fitzhardinge <jeremy@goop.org>
71134 */
71135 +#ifdef CONFIG_PAX_SEGMEXEC
71136 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71137 {
71138 + int ret = __do_munmap(mm, start, len);
71139 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71140 + return ret;
71141 +
71142 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71143 +}
71144 +
71145 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71146 +#else
71147 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71148 +#endif
71149 +{
71150 unsigned long end;
71151 struct vm_area_struct *vma, *prev, *last;
71152
71153 + /*
71154 + * mm->mmap_sem is required to protect against another thread
71155 + * changing the mappings in case we sleep.
71156 + */
71157 + verify_mm_writelocked(mm);
71158 +
71159 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71160 return -EINVAL;
71161
71162 @@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71163 /* Fix up all other VM information */
71164 remove_vma_list(mm, vma);
71165
71166 + track_exec_limit(mm, start, end, 0UL);
71167 +
71168 return 0;
71169 }
71170
71171 @@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71172
71173 profile_munmap(addr);
71174
71175 +#ifdef CONFIG_PAX_SEGMEXEC
71176 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71177 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
71178 + return -EINVAL;
71179 +#endif
71180 +
71181 down_write(&mm->mmap_sem);
71182 ret = do_munmap(mm, addr, len);
71183 up_write(&mm->mmap_sem);
71184 return ret;
71185 }
71186
71187 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71188 -{
71189 -#ifdef CONFIG_DEBUG_VM
71190 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71191 - WARN_ON(1);
71192 - up_read(&mm->mmap_sem);
71193 - }
71194 -#endif
71195 -}
71196 -
71197 /*
71198 * this is really a simplified "do_mmap". it only handles
71199 * anonymous maps. eventually we may be able to do some
71200 @@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71201 struct rb_node ** rb_link, * rb_parent;
71202 pgoff_t pgoff = addr >> PAGE_SHIFT;
71203 int error;
71204 + unsigned long charged;
71205
71206 len = PAGE_ALIGN(len);
71207 if (!len)
71208 @@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71209
71210 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71211
71212 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71213 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71214 + flags &= ~VM_EXEC;
71215 +
71216 +#ifdef CONFIG_PAX_MPROTECT
71217 + if (mm->pax_flags & MF_PAX_MPROTECT)
71218 + flags &= ~VM_MAYEXEC;
71219 +#endif
71220 +
71221 + }
71222 +#endif
71223 +
71224 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71225 if (error & ~PAGE_MASK)
71226 return error;
71227
71228 + charged = len >> PAGE_SHIFT;
71229 +
71230 /*
71231 * mlock MCL_FUTURE?
71232 */
71233 if (mm->def_flags & VM_LOCKED) {
71234 unsigned long locked, lock_limit;
71235 - locked = len >> PAGE_SHIFT;
71236 + locked = charged;
71237 locked += mm->locked_vm;
71238 lock_limit = rlimit(RLIMIT_MEMLOCK);
71239 lock_limit >>= PAGE_SHIFT;
71240 @@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71241 /*
71242 * Clear old maps. this also does some error checking for us
71243 */
71244 - munmap_back:
71245 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71246 if (vma && vma->vm_start < addr + len) {
71247 if (do_munmap(mm, addr, len))
71248 return -ENOMEM;
71249 - goto munmap_back;
71250 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71251 + BUG_ON(vma && vma->vm_start < addr + len);
71252 }
71253
71254 /* Check against address space limits *after* clearing old maps... */
71255 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71256 + if (!may_expand_vm(mm, charged))
71257 return -ENOMEM;
71258
71259 if (mm->map_count > sysctl_max_map_count)
71260 return -ENOMEM;
71261
71262 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
71263 + if (security_vm_enough_memory(charged))
71264 return -ENOMEM;
71265
71266 /* Can we just expand an old private anonymous mapping? */
71267 @@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71268 */
71269 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71270 if (!vma) {
71271 - vm_unacct_memory(len >> PAGE_SHIFT);
71272 + vm_unacct_memory(charged);
71273 return -ENOMEM;
71274 }
71275
71276 @@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71277 vma_link(mm, vma, prev, rb_link, rb_parent);
71278 out:
71279 perf_event_mmap(vma);
71280 - mm->total_vm += len >> PAGE_SHIFT;
71281 + mm->total_vm += charged;
71282 if (flags & VM_LOCKED) {
71283 if (!mlock_vma_pages_range(vma, addr, addr + len))
71284 - mm->locked_vm += (len >> PAGE_SHIFT);
71285 + mm->locked_vm += charged;
71286 }
71287 + track_exec_limit(mm, addr, addr + len, flags);
71288 return addr;
71289 }
71290
71291 @@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
71292 * Walk the list again, actually closing and freeing it,
71293 * with preemption enabled, without holding any MM locks.
71294 */
71295 - while (vma)
71296 + while (vma) {
71297 + vma->vm_mirror = NULL;
71298 vma = remove_vma(vma);
71299 + }
71300
71301 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71302 }
71303 @@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71304 struct vm_area_struct * __vma, * prev;
71305 struct rb_node ** rb_link, * rb_parent;
71306
71307 +#ifdef CONFIG_PAX_SEGMEXEC
71308 + struct vm_area_struct *vma_m = NULL;
71309 +#endif
71310 +
71311 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71312 + return -EPERM;
71313 +
71314 /*
71315 * The vm_pgoff of a purely anonymous vma should be irrelevant
71316 * until its first write fault, when page's anon_vma and index
71317 @@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71318 if ((vma->vm_flags & VM_ACCOUNT) &&
71319 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71320 return -ENOMEM;
71321 +
71322 +#ifdef CONFIG_PAX_SEGMEXEC
71323 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71324 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71325 + if (!vma_m)
71326 + return -ENOMEM;
71327 + }
71328 +#endif
71329 +
71330 vma_link(mm, vma, prev, rb_link, rb_parent);
71331 +
71332 +#ifdef CONFIG_PAX_SEGMEXEC
71333 + if (vma_m)
71334 + BUG_ON(pax_mirror_vma(vma_m, vma));
71335 +#endif
71336 +
71337 return 0;
71338 }
71339
71340 @@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71341 struct mempolicy *pol;
71342 bool faulted_in_anon_vma = true;
71343
71344 + BUG_ON(vma->vm_mirror);
71345 +
71346 /*
71347 * If anonymous vma has not yet been faulted, update new pgoff
71348 * to match new location, to increase its chance of merging.
71349 @@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71350 return NULL;
71351 }
71352
71353 +#ifdef CONFIG_PAX_SEGMEXEC
71354 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71355 +{
71356 + struct vm_area_struct *prev_m;
71357 + struct rb_node **rb_link_m, *rb_parent_m;
71358 + struct mempolicy *pol_m;
71359 +
71360 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71361 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71362 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71363 + *vma_m = *vma;
71364 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71365 + if (anon_vma_clone(vma_m, vma))
71366 + return -ENOMEM;
71367 + pol_m = vma_policy(vma_m);
71368 + mpol_get(pol_m);
71369 + vma_set_policy(vma_m, pol_m);
71370 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71371 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71372 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71373 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71374 + if (vma_m->vm_file)
71375 + get_file(vma_m->vm_file);
71376 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71377 + vma_m->vm_ops->open(vma_m);
71378 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71379 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71380 + vma_m->vm_mirror = vma;
71381 + vma->vm_mirror = vma_m;
71382 + return 0;
71383 +}
71384 +#endif
71385 +
71386 /*
71387 * Return true if the calling process may expand its vm space by the passed
71388 * number of pages
71389 @@ -2392,7 +2882,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71390 unsigned long lim;
71391
71392 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71393 -
71394 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71395 if (cur + npages > lim)
71396 return 0;
71397 return 1;
71398 @@ -2463,6 +2953,22 @@ int install_special_mapping(struct mm_struct *mm,
71399 vma->vm_start = addr;
71400 vma->vm_end = addr + len;
71401
71402 +#ifdef CONFIG_PAX_MPROTECT
71403 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71404 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71405 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71406 + return -EPERM;
71407 + if (!(vm_flags & VM_EXEC))
71408 + vm_flags &= ~VM_MAYEXEC;
71409 +#else
71410 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71411 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71412 +#endif
71413 + else
71414 + vm_flags &= ~VM_MAYWRITE;
71415 + }
71416 +#endif
71417 +
71418 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71419 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71420
71421 diff --git a/mm/mprotect.c b/mm/mprotect.c
71422 index f437d05..e3763f6 100644
71423 --- a/mm/mprotect.c
71424 +++ b/mm/mprotect.c
71425 @@ -23,10 +23,16 @@
71426 #include <linux/mmu_notifier.h>
71427 #include <linux/migrate.h>
71428 #include <linux/perf_event.h>
71429 +
71430 +#ifdef CONFIG_PAX_MPROTECT
71431 +#include <linux/elf.h>
71432 +#endif
71433 +
71434 #include <asm/uaccess.h>
71435 #include <asm/pgtable.h>
71436 #include <asm/cacheflush.h>
71437 #include <asm/tlbflush.h>
71438 +#include <asm/mmu_context.h>
71439
71440 #ifndef pgprot_modify
71441 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71442 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
71443 flush_tlb_range(vma, start, end);
71444 }
71445
71446 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71447 +/* called while holding the mmap semaphor for writing except stack expansion */
71448 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71449 +{
71450 + unsigned long oldlimit, newlimit = 0UL;
71451 +
71452 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71453 + return;
71454 +
71455 + spin_lock(&mm->page_table_lock);
71456 + oldlimit = mm->context.user_cs_limit;
71457 + if ((prot & VM_EXEC) && oldlimit < end)
71458 + /* USER_CS limit moved up */
71459 + newlimit = end;
71460 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71461 + /* USER_CS limit moved down */
71462 + newlimit = start;
71463 +
71464 + if (newlimit) {
71465 + mm->context.user_cs_limit = newlimit;
71466 +
71467 +#ifdef CONFIG_SMP
71468 + wmb();
71469 + cpus_clear(mm->context.cpu_user_cs_mask);
71470 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71471 +#endif
71472 +
71473 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71474 + }
71475 + spin_unlock(&mm->page_table_lock);
71476 + if (newlimit == end) {
71477 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71478 +
71479 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71480 + if (is_vm_hugetlb_page(vma))
71481 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71482 + else
71483 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71484 + }
71485 +}
71486 +#endif
71487 +
71488 int
71489 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71490 unsigned long start, unsigned long end, unsigned long newflags)
71491 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71492 int error;
71493 int dirty_accountable = 0;
71494
71495 +#ifdef CONFIG_PAX_SEGMEXEC
71496 + struct vm_area_struct *vma_m = NULL;
71497 + unsigned long start_m, end_m;
71498 +
71499 + start_m = start + SEGMEXEC_TASK_SIZE;
71500 + end_m = end + SEGMEXEC_TASK_SIZE;
71501 +#endif
71502 +
71503 if (newflags == oldflags) {
71504 *pprev = vma;
71505 return 0;
71506 }
71507
71508 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71509 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71510 +
71511 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71512 + return -ENOMEM;
71513 +
71514 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71515 + return -ENOMEM;
71516 + }
71517 +
71518 /*
71519 * If we make a private mapping writable we increase our commit;
71520 * but (without finer accounting) cannot reduce our commit if we
71521 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71522 }
71523 }
71524
71525 +#ifdef CONFIG_PAX_SEGMEXEC
71526 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71527 + if (start != vma->vm_start) {
71528 + error = split_vma(mm, vma, start, 1);
71529 + if (error)
71530 + goto fail;
71531 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71532 + *pprev = (*pprev)->vm_next;
71533 + }
71534 +
71535 + if (end != vma->vm_end) {
71536 + error = split_vma(mm, vma, end, 0);
71537 + if (error)
71538 + goto fail;
71539 + }
71540 +
71541 + if (pax_find_mirror_vma(vma)) {
71542 + error = __do_munmap(mm, start_m, end_m - start_m);
71543 + if (error)
71544 + goto fail;
71545 + } else {
71546 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71547 + if (!vma_m) {
71548 + error = -ENOMEM;
71549 + goto fail;
71550 + }
71551 + vma->vm_flags = newflags;
71552 + error = pax_mirror_vma(vma_m, vma);
71553 + if (error) {
71554 + vma->vm_flags = oldflags;
71555 + goto fail;
71556 + }
71557 + }
71558 + }
71559 +#endif
71560 +
71561 /*
71562 * First try to merge with previous and/or next vma.
71563 */
71564 @@ -204,9 +306,21 @@ success:
71565 * vm_flags and vm_page_prot are protected by the mmap_sem
71566 * held in write mode.
71567 */
71568 +
71569 +#ifdef CONFIG_PAX_SEGMEXEC
71570 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71571 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71572 +#endif
71573 +
71574 vma->vm_flags = newflags;
71575 +
71576 +#ifdef CONFIG_PAX_MPROTECT
71577 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71578 + mm->binfmt->handle_mprotect(vma, newflags);
71579 +#endif
71580 +
71581 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71582 - vm_get_page_prot(newflags));
71583 + vm_get_page_prot(vma->vm_flags));
71584
71585 if (vma_wants_writenotify(vma)) {
71586 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71587 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71588 end = start + len;
71589 if (end <= start)
71590 return -ENOMEM;
71591 +
71592 +#ifdef CONFIG_PAX_SEGMEXEC
71593 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71594 + if (end > SEGMEXEC_TASK_SIZE)
71595 + return -EINVAL;
71596 + } else
71597 +#endif
71598 +
71599 + if (end > TASK_SIZE)
71600 + return -EINVAL;
71601 +
71602 if (!arch_validate_prot(prot))
71603 return -EINVAL;
71604
71605 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71606 /*
71607 * Does the application expect PROT_READ to imply PROT_EXEC:
71608 */
71609 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71610 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71611 prot |= PROT_EXEC;
71612
71613 vm_flags = calc_vm_prot_bits(prot);
71614 @@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71615 if (start > vma->vm_start)
71616 prev = vma;
71617
71618 +#ifdef CONFIG_PAX_MPROTECT
71619 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
71620 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
71621 +#endif
71622 +
71623 for (nstart = start ; ; ) {
71624 unsigned long newflags;
71625
71626 @@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71627
71628 /* newflags >> 4 shift VM_MAY% in place of VM_% */
71629 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
71630 + if (prot & (PROT_WRITE | PROT_EXEC))
71631 + gr_log_rwxmprotect(vma->vm_file);
71632 +
71633 + error = -EACCES;
71634 + goto out;
71635 + }
71636 +
71637 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
71638 error = -EACCES;
71639 goto out;
71640 }
71641 @@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71642 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
71643 if (error)
71644 goto out;
71645 +
71646 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
71647 +
71648 nstart = tmp;
71649
71650 if (nstart < prev->vm_end)
71651 diff --git a/mm/mremap.c b/mm/mremap.c
71652 index 87bb839..c3bfadb 100644
71653 --- a/mm/mremap.c
71654 +++ b/mm/mremap.c
71655 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
71656 continue;
71657 pte = ptep_get_and_clear(mm, old_addr, old_pte);
71658 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
71659 +
71660 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71661 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
71662 + pte = pte_exprotect(pte);
71663 +#endif
71664 +
71665 set_pte_at(mm, new_addr, new_pte, pte);
71666 }
71667
71668 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
71669 if (is_vm_hugetlb_page(vma))
71670 goto Einval;
71671
71672 +#ifdef CONFIG_PAX_SEGMEXEC
71673 + if (pax_find_mirror_vma(vma))
71674 + goto Einval;
71675 +#endif
71676 +
71677 /* We can't remap across vm area boundaries */
71678 if (old_len > vma->vm_end - addr)
71679 goto Efault;
71680 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
71681 unsigned long ret = -EINVAL;
71682 unsigned long charged = 0;
71683 unsigned long map_flags;
71684 + unsigned long pax_task_size = TASK_SIZE;
71685
71686 if (new_addr & ~PAGE_MASK)
71687 goto out;
71688
71689 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
71690 +#ifdef CONFIG_PAX_SEGMEXEC
71691 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
71692 + pax_task_size = SEGMEXEC_TASK_SIZE;
71693 +#endif
71694 +
71695 + pax_task_size -= PAGE_SIZE;
71696 +
71697 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
71698 goto out;
71699
71700 /* Check if the location we're moving into overlaps the
71701 * old location at all, and fail if it does.
71702 */
71703 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
71704 - goto out;
71705 -
71706 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
71707 + if (addr + old_len > new_addr && new_addr + new_len > addr)
71708 goto out;
71709
71710 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71711 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
71712 struct vm_area_struct *vma;
71713 unsigned long ret = -EINVAL;
71714 unsigned long charged = 0;
71715 + unsigned long pax_task_size = TASK_SIZE;
71716
71717 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
71718 goto out;
71719 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
71720 if (!new_len)
71721 goto out;
71722
71723 +#ifdef CONFIG_PAX_SEGMEXEC
71724 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
71725 + pax_task_size = SEGMEXEC_TASK_SIZE;
71726 +#endif
71727 +
71728 + pax_task_size -= PAGE_SIZE;
71729 +
71730 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
71731 + old_len > pax_task_size || addr > pax_task_size-old_len)
71732 + goto out;
71733 +
71734 if (flags & MREMAP_FIXED) {
71735 if (flags & MREMAP_MAYMOVE)
71736 ret = mremap_to(addr, old_len, new_addr, new_len);
71737 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
71738 addr + new_len);
71739 }
71740 ret = addr;
71741 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
71742 goto out;
71743 }
71744 }
71745 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
71746 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71747 if (ret)
71748 goto out;
71749 +
71750 + map_flags = vma->vm_flags;
71751 ret = move_vma(vma, addr, old_len, new_len, new_addr);
71752 + if (!(ret & ~PAGE_MASK)) {
71753 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
71754 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
71755 + }
71756 }
71757 out:
71758 if (ret & ~PAGE_MASK)
71759 diff --git a/mm/nommu.c b/mm/nommu.c
71760 index f59e170..34e2a2b 100644
71761 --- a/mm/nommu.c
71762 +++ b/mm/nommu.c
71763 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
71764 int sysctl_overcommit_ratio = 50; /* default is 50% */
71765 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
71766 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
71767 -int heap_stack_gap = 0;
71768
71769 atomic_long_t mmap_pages_allocated;
71770
71771 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
71772 EXPORT_SYMBOL(find_vma);
71773
71774 /*
71775 - * find a VMA
71776 - * - we don't extend stack VMAs under NOMMU conditions
71777 - */
71778 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
71779 -{
71780 - return find_vma(mm, addr);
71781 -}
71782 -
71783 -/*
71784 * expand a stack to a given address
71785 * - not supported under NOMMU conditions
71786 */
71787 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71788
71789 /* most fields are the same, copy all, and then fixup */
71790 *new = *vma;
71791 + INIT_LIST_HEAD(&new->anon_vma_chain);
71792 *region = *vma->vm_region;
71793 new->vm_region = region;
71794
71795 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
71796 index a13ded1..b949d15 100644
71797 --- a/mm/page_alloc.c
71798 +++ b/mm/page_alloc.c
71799 @@ -335,7 +335,7 @@ out:
71800 * This usage means that zero-order pages may not be compound.
71801 */
71802
71803 -static void free_compound_page(struct page *page)
71804 +void free_compound_page(struct page *page)
71805 {
71806 __free_pages_ok(page, compound_order(page));
71807 }
71808 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
71809 int i;
71810 int bad = 0;
71811
71812 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
71813 + unsigned long index = 1UL << order;
71814 +#endif
71815 +
71816 trace_mm_page_free(page, order);
71817 kmemcheck_free_shadow(page, order);
71818
71819 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
71820 debug_check_no_obj_freed(page_address(page),
71821 PAGE_SIZE << order);
71822 }
71823 +
71824 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
71825 + for (; index; --index)
71826 + sanitize_highpage(page + index - 1);
71827 +#endif
71828 +
71829 arch_free_page(page, order);
71830 kernel_map_pages(page, 1 << order, 0);
71831
71832 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
71833 arch_alloc_page(page, order);
71834 kernel_map_pages(page, 1 << order, 1);
71835
71836 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
71837 if (gfp_flags & __GFP_ZERO)
71838 prep_zero_page(page, order, gfp_flags);
71839 +#endif
71840
71841 if (order && (gfp_flags & __GFP_COMP))
71842 prep_compound_page(page, order);
71843 @@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
71844 unsigned long pfn;
71845
71846 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
71847 +#ifdef CONFIG_X86_32
71848 + /* boot failures in VMware 8 on 32bit vanilla since
71849 + this change */
71850 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
71851 +#else
71852 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
71853 +#endif
71854 return 1;
71855 }
71856 return 0;
71857 diff --git a/mm/percpu.c b/mm/percpu.c
71858 index f47af91..7eeef99 100644
71859 --- a/mm/percpu.c
71860 +++ b/mm/percpu.c
71861 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
71862 static unsigned int pcpu_high_unit_cpu __read_mostly;
71863
71864 /* the address of the first chunk which starts with the kernel static area */
71865 -void *pcpu_base_addr __read_mostly;
71866 +void *pcpu_base_addr __read_only;
71867 EXPORT_SYMBOL_GPL(pcpu_base_addr);
71868
71869 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
71870 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
71871 index c20ff48..137702a 100644
71872 --- a/mm/process_vm_access.c
71873 +++ b/mm/process_vm_access.c
71874 @@ -13,6 +13,7 @@
71875 #include <linux/uio.h>
71876 #include <linux/sched.h>
71877 #include <linux/highmem.h>
71878 +#include <linux/security.h>
71879 #include <linux/ptrace.h>
71880 #include <linux/slab.h>
71881 #include <linux/syscalls.h>
71882 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
71883 size_t iov_l_curr_offset = 0;
71884 ssize_t iov_len;
71885
71886 + return -ENOSYS; // PaX: until properly audited
71887 +
71888 /*
71889 * Work out how many pages of struct pages we're going to need
71890 * when eventually calling get_user_pages
71891 */
71892 for (i = 0; i < riovcnt; i++) {
71893 iov_len = rvec[i].iov_len;
71894 - if (iov_len > 0) {
71895 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
71896 - + iov_len)
71897 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
71898 - / PAGE_SIZE + 1;
71899 - nr_pages = max(nr_pages, nr_pages_iov);
71900 - }
71901 + if (iov_len <= 0)
71902 + continue;
71903 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
71904 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
71905 + nr_pages = max(nr_pages, nr_pages_iov);
71906 }
71907
71908 if (nr_pages == 0)
71909 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
71910 goto free_proc_pages;
71911 }
71912
71913 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
71914 + rc = -EPERM;
71915 + goto put_task_struct;
71916 + }
71917 +
71918 mm = mm_access(task, PTRACE_MODE_ATTACH);
71919 if (!mm || IS_ERR(mm)) {
71920 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
71921 diff --git a/mm/rmap.c b/mm/rmap.c
71922 index c8454e0..b04f3a2 100644
71923 --- a/mm/rmap.c
71924 +++ b/mm/rmap.c
71925 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71926 struct anon_vma *anon_vma = vma->anon_vma;
71927 struct anon_vma_chain *avc;
71928
71929 +#ifdef CONFIG_PAX_SEGMEXEC
71930 + struct anon_vma_chain *avc_m = NULL;
71931 +#endif
71932 +
71933 might_sleep();
71934 if (unlikely(!anon_vma)) {
71935 struct mm_struct *mm = vma->vm_mm;
71936 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71937 if (!avc)
71938 goto out_enomem;
71939
71940 +#ifdef CONFIG_PAX_SEGMEXEC
71941 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
71942 + if (!avc_m)
71943 + goto out_enomem_free_avc;
71944 +#endif
71945 +
71946 anon_vma = find_mergeable_anon_vma(vma);
71947 allocated = NULL;
71948 if (!anon_vma) {
71949 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71950 /* page_table_lock to protect against threads */
71951 spin_lock(&mm->page_table_lock);
71952 if (likely(!vma->anon_vma)) {
71953 +
71954 +#ifdef CONFIG_PAX_SEGMEXEC
71955 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
71956 +
71957 + if (vma_m) {
71958 + BUG_ON(vma_m->anon_vma);
71959 + vma_m->anon_vma = anon_vma;
71960 + avc_m->anon_vma = anon_vma;
71961 + avc_m->vma = vma;
71962 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
71963 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
71964 + avc_m = NULL;
71965 + }
71966 +#endif
71967 +
71968 vma->anon_vma = anon_vma;
71969 avc->anon_vma = anon_vma;
71970 avc->vma = vma;
71971 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71972
71973 if (unlikely(allocated))
71974 put_anon_vma(allocated);
71975 +
71976 +#ifdef CONFIG_PAX_SEGMEXEC
71977 + if (unlikely(avc_m))
71978 + anon_vma_chain_free(avc_m);
71979 +#endif
71980 +
71981 if (unlikely(avc))
71982 anon_vma_chain_free(avc);
71983 }
71984 return 0;
71985
71986 out_enomem_free_avc:
71987 +
71988 +#ifdef CONFIG_PAX_SEGMEXEC
71989 + if (avc_m)
71990 + anon_vma_chain_free(avc_m);
71991 +#endif
71992 +
71993 anon_vma_chain_free(avc);
71994 out_enomem:
71995 return -ENOMEM;
71996 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
71997 * Attach the anon_vmas from src to dst.
71998 * Returns 0 on success, -ENOMEM on failure.
71999 */
72000 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72001 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72002 {
72003 struct anon_vma_chain *avc, *pavc;
72004 struct anon_vma *root = NULL;
72005 @@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72006 * the corresponding VMA in the parent process is attached to.
72007 * Returns 0 on success, non-zero on failure.
72008 */
72009 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72010 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72011 {
72012 struct anon_vma_chain *avc;
72013 struct anon_vma *anon_vma;
72014 diff --git a/mm/shmem.c b/mm/shmem.c
72015 index 269d049..a9d2b50 100644
72016 --- a/mm/shmem.c
72017 +++ b/mm/shmem.c
72018 @@ -31,7 +31,7 @@
72019 #include <linux/export.h>
72020 #include <linux/swap.h>
72021
72022 -static struct vfsmount *shm_mnt;
72023 +struct vfsmount *shm_mnt;
72024
72025 #ifdef CONFIG_SHMEM
72026 /*
72027 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72028 #define BOGO_DIRENT_SIZE 20
72029
72030 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72031 -#define SHORT_SYMLINK_LEN 128
72032 +#define SHORT_SYMLINK_LEN 64
72033
72034 struct shmem_xattr {
72035 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72036 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72037 int err = -ENOMEM;
72038
72039 /* Round up to L1_CACHE_BYTES to resist false sharing */
72040 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72041 - L1_CACHE_BYTES), GFP_KERNEL);
72042 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72043 if (!sbinfo)
72044 return -ENOMEM;
72045
72046 diff --git a/mm/slab.c b/mm/slab.c
72047 index f0bd785..348b96a 100644
72048 --- a/mm/slab.c
72049 +++ b/mm/slab.c
72050 @@ -153,7 +153,7 @@
72051
72052 /* Legal flag mask for kmem_cache_create(). */
72053 #if DEBUG
72054 -# define CREATE_MASK (SLAB_RED_ZONE | \
72055 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72056 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72057 SLAB_CACHE_DMA | \
72058 SLAB_STORE_USER | \
72059 @@ -161,7 +161,7 @@
72060 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72061 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72062 #else
72063 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72064 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72065 SLAB_CACHE_DMA | \
72066 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72067 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72068 @@ -290,7 +290,7 @@ struct kmem_list3 {
72069 * Need this for bootstrapping a per node allocator.
72070 */
72071 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72072 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72073 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72074 #define CACHE_CACHE 0
72075 #define SIZE_AC MAX_NUMNODES
72076 #define SIZE_L3 (2 * MAX_NUMNODES)
72077 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72078 if ((x)->max_freeable < i) \
72079 (x)->max_freeable = i; \
72080 } while (0)
72081 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72082 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72083 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72084 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72085 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72086 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72087 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72088 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72089 #else
72090 #define STATS_INC_ACTIVE(x) do { } while (0)
72091 #define STATS_DEC_ACTIVE(x) do { } while (0)
72092 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72093 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72094 */
72095 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72096 - const struct slab *slab, void *obj)
72097 + const struct slab *slab, const void *obj)
72098 {
72099 u32 offset = (obj - slab->s_mem);
72100 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72101 @@ -568,7 +568,7 @@ struct cache_names {
72102 static struct cache_names __initdata cache_names[] = {
72103 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72104 #include <linux/kmalloc_sizes.h>
72105 - {NULL,}
72106 + {NULL}
72107 #undef CACHE
72108 };
72109
72110 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72111 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72112 sizes[INDEX_AC].cs_size,
72113 ARCH_KMALLOC_MINALIGN,
72114 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72115 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72116 NULL);
72117
72118 if (INDEX_AC != INDEX_L3) {
72119 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72120 kmem_cache_create(names[INDEX_L3].name,
72121 sizes[INDEX_L3].cs_size,
72122 ARCH_KMALLOC_MINALIGN,
72123 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72124 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72125 NULL);
72126 }
72127
72128 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72129 sizes->cs_cachep = kmem_cache_create(names->name,
72130 sizes->cs_size,
72131 ARCH_KMALLOC_MINALIGN,
72132 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72133 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72134 NULL);
72135 }
72136 #ifdef CONFIG_ZONE_DMA
72137 @@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
72138 }
72139 /* cpu stats */
72140 {
72141 - unsigned long allochit = atomic_read(&cachep->allochit);
72142 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72143 - unsigned long freehit = atomic_read(&cachep->freehit);
72144 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72145 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72146 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72147 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72148 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72149
72150 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72151 allochit, allocmiss, freehit, freemiss);
72152 @@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
72153 {
72154 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72155 #ifdef CONFIG_DEBUG_SLAB_LEAK
72156 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72157 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72158 #endif
72159 return 0;
72160 }
72161 module_init(slab_proc_init);
72162 #endif
72163
72164 +void check_object_size(const void *ptr, unsigned long n, bool to)
72165 +{
72166 +
72167 +#ifdef CONFIG_PAX_USERCOPY
72168 + struct page *page;
72169 + struct kmem_cache *cachep = NULL;
72170 + struct slab *slabp;
72171 + unsigned int objnr;
72172 + unsigned long offset;
72173 + const char *type;
72174 +
72175 + if (!n)
72176 + return;
72177 +
72178 + type = "<null>";
72179 + if (ZERO_OR_NULL_PTR(ptr))
72180 + goto report;
72181 +
72182 + if (!virt_addr_valid(ptr))
72183 + return;
72184 +
72185 + page = virt_to_head_page(ptr);
72186 +
72187 + type = "<process stack>";
72188 + if (!PageSlab(page)) {
72189 + if (object_is_on_stack(ptr, n) == -1)
72190 + goto report;
72191 + return;
72192 + }
72193 +
72194 + cachep = page_get_cache(page);
72195 + type = cachep->name;
72196 + if (!(cachep->flags & SLAB_USERCOPY))
72197 + goto report;
72198 +
72199 + slabp = page_get_slab(page);
72200 + objnr = obj_to_index(cachep, slabp, ptr);
72201 + BUG_ON(objnr >= cachep->num);
72202 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72203 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72204 + return;
72205 +
72206 +report:
72207 + pax_report_usercopy(ptr, n, to, type);
72208 +#endif
72209 +
72210 +}
72211 +EXPORT_SYMBOL(check_object_size);
72212 +
72213 /**
72214 * ksize - get the actual amount of memory allocated for a given object
72215 * @objp: Pointer to the object
72216 diff --git a/mm/slob.c b/mm/slob.c
72217 index 8105be4..e045f96 100644
72218 --- a/mm/slob.c
72219 +++ b/mm/slob.c
72220 @@ -29,7 +29,7 @@
72221 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72222 * alloc_pages() directly, allocating compound pages so the page order
72223 * does not have to be separately tracked, and also stores the exact
72224 - * allocation size in page->private so that it can be used to accurately
72225 + * allocation size in slob_page->size so that it can be used to accurately
72226 * provide ksize(). These objects are detected in kfree() because slob_page()
72227 * is false for them.
72228 *
72229 @@ -58,6 +58,7 @@
72230 */
72231
72232 #include <linux/kernel.h>
72233 +#include <linux/sched.h>
72234 #include <linux/slab.h>
72235 #include <linux/mm.h>
72236 #include <linux/swap.h> /* struct reclaim_state */
72237 @@ -102,7 +103,8 @@ struct slob_page {
72238 unsigned long flags; /* mandatory */
72239 atomic_t _count; /* mandatory */
72240 slobidx_t units; /* free units left in page */
72241 - unsigned long pad[2];
72242 + unsigned long pad[1];
72243 + unsigned long size; /* size when >=PAGE_SIZE */
72244 slob_t *free; /* first free slob_t in page */
72245 struct list_head list; /* linked list of free pages */
72246 };
72247 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72248 */
72249 static inline int is_slob_page(struct slob_page *sp)
72250 {
72251 - return PageSlab((struct page *)sp);
72252 + return PageSlab((struct page *)sp) && !sp->size;
72253 }
72254
72255 static inline void set_slob_page(struct slob_page *sp)
72256 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72257
72258 static inline struct slob_page *slob_page(const void *addr)
72259 {
72260 - return (struct slob_page *)virt_to_page(addr);
72261 + return (struct slob_page *)virt_to_head_page(addr);
72262 }
72263
72264 /*
72265 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72266 /*
72267 * Return the size of a slob block.
72268 */
72269 -static slobidx_t slob_units(slob_t *s)
72270 +static slobidx_t slob_units(const slob_t *s)
72271 {
72272 if (s->units > 0)
72273 return s->units;
72274 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72275 /*
72276 * Return the next free slob block pointer after this one.
72277 */
72278 -static slob_t *slob_next(slob_t *s)
72279 +static slob_t *slob_next(const slob_t *s)
72280 {
72281 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72282 slobidx_t next;
72283 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72284 /*
72285 * Returns true if s is the last free block in its page.
72286 */
72287 -static int slob_last(slob_t *s)
72288 +static int slob_last(const slob_t *s)
72289 {
72290 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72291 }
72292 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72293 if (!page)
72294 return NULL;
72295
72296 + set_slob_page(page);
72297 return page_address(page);
72298 }
72299
72300 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72301 if (!b)
72302 return NULL;
72303 sp = slob_page(b);
72304 - set_slob_page(sp);
72305
72306 spin_lock_irqsave(&slob_lock, flags);
72307 sp->units = SLOB_UNITS(PAGE_SIZE);
72308 sp->free = b;
72309 + sp->size = 0;
72310 INIT_LIST_HEAD(&sp->list);
72311 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72312 set_slob_page_free(sp, slob_list);
72313 @@ -476,10 +479,9 @@ out:
72314 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72315 */
72316
72317 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72318 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72319 {
72320 - unsigned int *m;
72321 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72322 + slob_t *m;
72323 void *ret;
72324
72325 gfp &= gfp_allowed_mask;
72326 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72327
72328 if (!m)
72329 return NULL;
72330 - *m = size;
72331 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72332 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72333 + m[0].units = size;
72334 + m[1].units = align;
72335 ret = (void *)m + align;
72336
72337 trace_kmalloc_node(_RET_IP_, ret,
72338 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72339 gfp |= __GFP_COMP;
72340 ret = slob_new_pages(gfp, order, node);
72341 if (ret) {
72342 - struct page *page;
72343 - page = virt_to_page(ret);
72344 - page->private = size;
72345 + struct slob_page *sp;
72346 + sp = slob_page(ret);
72347 + sp->size = size;
72348 }
72349
72350 trace_kmalloc_node(_RET_IP_, ret,
72351 size, PAGE_SIZE << order, gfp, node);
72352 }
72353
72354 - kmemleak_alloc(ret, size, 1, gfp);
72355 + return ret;
72356 +}
72357 +
72358 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72359 +{
72360 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72361 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72362 +
72363 + if (!ZERO_OR_NULL_PTR(ret))
72364 + kmemleak_alloc(ret, size, 1, gfp);
72365 return ret;
72366 }
72367 EXPORT_SYMBOL(__kmalloc_node);
72368 @@ -533,13 +547,92 @@ void kfree(const void *block)
72369 sp = slob_page(block);
72370 if (is_slob_page(sp)) {
72371 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72372 - unsigned int *m = (unsigned int *)(block - align);
72373 - slob_free(m, *m + align);
72374 - } else
72375 + slob_t *m = (slob_t *)(block - align);
72376 + slob_free(m, m[0].units + align);
72377 + } else {
72378 + clear_slob_page(sp);
72379 + free_slob_page(sp);
72380 + sp->size = 0;
72381 put_page(&sp->page);
72382 + }
72383 }
72384 EXPORT_SYMBOL(kfree);
72385
72386 +void check_object_size(const void *ptr, unsigned long n, bool to)
72387 +{
72388 +
72389 +#ifdef CONFIG_PAX_USERCOPY
72390 + struct slob_page *sp;
72391 + const slob_t *free;
72392 + const void *base;
72393 + unsigned long flags;
72394 + const char *type;
72395 +
72396 + if (!n)
72397 + return;
72398 +
72399 + type = "<null>";
72400 + if (ZERO_OR_NULL_PTR(ptr))
72401 + goto report;
72402 +
72403 + if (!virt_addr_valid(ptr))
72404 + return;
72405 +
72406 + type = "<process stack>";
72407 + sp = slob_page(ptr);
72408 + if (!PageSlab((struct page *)sp)) {
72409 + if (object_is_on_stack(ptr, n) == -1)
72410 + goto report;
72411 + return;
72412 + }
72413 +
72414 + type = "<slob>";
72415 + if (sp->size) {
72416 + base = page_address(&sp->page);
72417 + if (base <= ptr && n <= sp->size - (ptr - base))
72418 + return;
72419 + goto report;
72420 + }
72421 +
72422 + /* some tricky double walking to find the chunk */
72423 + spin_lock_irqsave(&slob_lock, flags);
72424 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72425 + free = sp->free;
72426 +
72427 + while (!slob_last(free) && (void *)free <= ptr) {
72428 + base = free + slob_units(free);
72429 + free = slob_next(free);
72430 + }
72431 +
72432 + while (base < (void *)free) {
72433 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72434 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72435 + int offset;
72436 +
72437 + if (ptr < base + align)
72438 + break;
72439 +
72440 + offset = ptr - base - align;
72441 + if (offset >= m) {
72442 + base += size;
72443 + continue;
72444 + }
72445 +
72446 + if (n > m - offset)
72447 + break;
72448 +
72449 + spin_unlock_irqrestore(&slob_lock, flags);
72450 + return;
72451 + }
72452 +
72453 + spin_unlock_irqrestore(&slob_lock, flags);
72454 +report:
72455 + pax_report_usercopy(ptr, n, to, type);
72456 +#endif
72457 +
72458 +}
72459 +EXPORT_SYMBOL(check_object_size);
72460 +
72461 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72462 size_t ksize(const void *block)
72463 {
72464 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72465 sp = slob_page(block);
72466 if (is_slob_page(sp)) {
72467 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72468 - unsigned int *m = (unsigned int *)(block - align);
72469 - return SLOB_UNITS(*m) * SLOB_UNIT;
72470 + slob_t *m = (slob_t *)(block - align);
72471 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72472 } else
72473 - return sp->page.private;
72474 + return sp->size;
72475 }
72476 EXPORT_SYMBOL(ksize);
72477
72478 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72479 {
72480 struct kmem_cache *c;
72481
72482 +#ifdef CONFIG_PAX_USERCOPY
72483 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72484 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72485 +#else
72486 c = slob_alloc(sizeof(struct kmem_cache),
72487 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72488 +#endif
72489
72490 if (c) {
72491 c->name = name;
72492 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72493
72494 lockdep_trace_alloc(flags);
72495
72496 +#ifdef CONFIG_PAX_USERCOPY
72497 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72498 +#else
72499 if (c->size < PAGE_SIZE) {
72500 b = slob_alloc(c->size, flags, c->align, node);
72501 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72502 SLOB_UNITS(c->size) * SLOB_UNIT,
72503 flags, node);
72504 } else {
72505 + struct slob_page *sp;
72506 +
72507 b = slob_new_pages(flags, get_order(c->size), node);
72508 + sp = slob_page(b);
72509 + sp->size = c->size;
72510 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72511 PAGE_SIZE << get_order(c->size),
72512 flags, node);
72513 }
72514 +#endif
72515
72516 if (c->ctor)
72517 c->ctor(b);
72518 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72519
72520 static void __kmem_cache_free(void *b, int size)
72521 {
72522 - if (size < PAGE_SIZE)
72523 + struct slob_page *sp = slob_page(b);
72524 +
72525 + if (is_slob_page(sp))
72526 slob_free(b, size);
72527 - else
72528 + else {
72529 + clear_slob_page(sp);
72530 + free_slob_page(sp);
72531 + sp->size = 0;
72532 slob_free_pages(b, get_order(size));
72533 + }
72534 }
72535
72536 static void kmem_rcu_free(struct rcu_head *head)
72537 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72538
72539 void kmem_cache_free(struct kmem_cache *c, void *b)
72540 {
72541 + int size = c->size;
72542 +
72543 +#ifdef CONFIG_PAX_USERCOPY
72544 + if (size + c->align < PAGE_SIZE) {
72545 + size += c->align;
72546 + b -= c->align;
72547 + }
72548 +#endif
72549 +
72550 kmemleak_free_recursive(b, c->flags);
72551 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72552 struct slob_rcu *slob_rcu;
72553 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72554 - slob_rcu->size = c->size;
72555 + slob_rcu = b + (size - sizeof(struct slob_rcu));
72556 + slob_rcu->size = size;
72557 call_rcu(&slob_rcu->head, kmem_rcu_free);
72558 } else {
72559 - __kmem_cache_free(b, c->size);
72560 + __kmem_cache_free(b, size);
72561 }
72562
72563 +#ifdef CONFIG_PAX_USERCOPY
72564 + trace_kfree(_RET_IP_, b);
72565 +#else
72566 trace_kmem_cache_free(_RET_IP_, b);
72567 +#endif
72568 +
72569 }
72570 EXPORT_SYMBOL(kmem_cache_free);
72571
72572 diff --git a/mm/slub.c b/mm/slub.c
72573 index 4907563..e3d7905 100644
72574 --- a/mm/slub.c
72575 +++ b/mm/slub.c
72576 @@ -208,7 +208,7 @@ struct track {
72577
72578 enum track_item { TRACK_ALLOC, TRACK_FREE };
72579
72580 -#ifdef CONFIG_SYSFS
72581 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72582 static int sysfs_slab_add(struct kmem_cache *);
72583 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72584 static void sysfs_slab_remove(struct kmem_cache *);
72585 @@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
72586 if (!t->addr)
72587 return;
72588
72589 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72590 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72591 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72592 #ifdef CONFIG_STACKTRACE
72593 {
72594 @@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72595
72596 page = virt_to_head_page(x);
72597
72598 + BUG_ON(!PageSlab(page));
72599 +
72600 slab_free(s, page, x, _RET_IP_);
72601
72602 trace_kmem_cache_free(_RET_IP_, x);
72603 @@ -2604,7 +2606,7 @@ static int slub_min_objects;
72604 * Merge control. If this is set then no merging of slab caches will occur.
72605 * (Could be removed. This was introduced to pacify the merge skeptics.)
72606 */
72607 -static int slub_nomerge;
72608 +static int slub_nomerge = 1;
72609
72610 /*
72611 * Calculate the order of allocation given an slab object size.
72612 @@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
72613 else
72614 s->cpu_partial = 30;
72615
72616 - s->refcount = 1;
72617 + atomic_set(&s->refcount, 1);
72618 #ifdef CONFIG_NUMA
72619 s->remote_node_defrag_ratio = 1000;
72620 #endif
72621 @@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
72622 void kmem_cache_destroy(struct kmem_cache *s)
72623 {
72624 down_write(&slub_lock);
72625 - s->refcount--;
72626 - if (!s->refcount) {
72627 + if (atomic_dec_and_test(&s->refcount)) {
72628 list_del(&s->list);
72629 up_write(&slub_lock);
72630 if (kmem_cache_close(s)) {
72631 @@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
72632 EXPORT_SYMBOL(__kmalloc_node);
72633 #endif
72634
72635 +void check_object_size(const void *ptr, unsigned long n, bool to)
72636 +{
72637 +
72638 +#ifdef CONFIG_PAX_USERCOPY
72639 + struct page *page;
72640 + struct kmem_cache *s = NULL;
72641 + unsigned long offset;
72642 + const char *type;
72643 +
72644 + if (!n)
72645 + return;
72646 +
72647 + type = "<null>";
72648 + if (ZERO_OR_NULL_PTR(ptr))
72649 + goto report;
72650 +
72651 + if (!virt_addr_valid(ptr))
72652 + return;
72653 +
72654 + page = virt_to_head_page(ptr);
72655 +
72656 + type = "<process stack>";
72657 + if (!PageSlab(page)) {
72658 + if (object_is_on_stack(ptr, n) == -1)
72659 + goto report;
72660 + return;
72661 + }
72662 +
72663 + s = page->slab;
72664 + type = s->name;
72665 + if (!(s->flags & SLAB_USERCOPY))
72666 + goto report;
72667 +
72668 + offset = (ptr - page_address(page)) % s->size;
72669 + if (offset <= s->objsize && n <= s->objsize - offset)
72670 + return;
72671 +
72672 +report:
72673 + pax_report_usercopy(ptr, n, to, type);
72674 +#endif
72675 +
72676 +}
72677 +EXPORT_SYMBOL(check_object_size);
72678 +
72679 size_t ksize(const void *object)
72680 {
72681 struct page *page;
72682 @@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
72683 int node;
72684
72685 list_add(&s->list, &slab_caches);
72686 - s->refcount = -1;
72687 + atomic_set(&s->refcount, -1);
72688
72689 for_each_node_state(node, N_NORMAL_MEMORY) {
72690 struct kmem_cache_node *n = get_node(s, node);
72691 @@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
72692
72693 /* Caches that are not of the two-to-the-power-of size */
72694 if (KMALLOC_MIN_SIZE <= 32) {
72695 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
72696 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
72697 caches++;
72698 }
72699
72700 if (KMALLOC_MIN_SIZE <= 64) {
72701 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
72702 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
72703 caches++;
72704 }
72705
72706 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
72707 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
72708 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
72709 caches++;
72710 }
72711
72712 @@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
72713 /*
72714 * We may have set a slab to be unmergeable during bootstrap.
72715 */
72716 - if (s->refcount < 0)
72717 + if (atomic_read(&s->refcount) < 0)
72718 return 1;
72719
72720 return 0;
72721 @@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72722 down_write(&slub_lock);
72723 s = find_mergeable(size, align, flags, name, ctor);
72724 if (s) {
72725 - s->refcount++;
72726 + atomic_inc(&s->refcount);
72727 /*
72728 * Adjust the object sizes so that we clear
72729 * the complete object on kzalloc.
72730 @@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72731 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
72732
72733 if (sysfs_slab_alias(s, name)) {
72734 - s->refcount--;
72735 + atomic_dec(&s->refcount);
72736 goto err;
72737 }
72738 up_write(&slub_lock);
72739 @@ -4041,7 +4086,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
72740 }
72741 #endif
72742
72743 -#ifdef CONFIG_SYSFS
72744 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72745 static int count_inuse(struct page *page)
72746 {
72747 return page->inuse;
72748 @@ -4428,12 +4473,12 @@ static void resiliency_test(void)
72749 validate_slab_cache(kmalloc_caches[9]);
72750 }
72751 #else
72752 -#ifdef CONFIG_SYSFS
72753 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72754 static void resiliency_test(void) {};
72755 #endif
72756 #endif
72757
72758 -#ifdef CONFIG_SYSFS
72759 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72760 enum slab_stat_type {
72761 SL_ALL, /* All slabs */
72762 SL_PARTIAL, /* Only partially allocated slabs */
72763 @@ -4676,7 +4721,7 @@ SLAB_ATTR_RO(ctor);
72764
72765 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
72766 {
72767 - return sprintf(buf, "%d\n", s->refcount - 1);
72768 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
72769 }
72770 SLAB_ATTR_RO(aliases);
72771
72772 @@ -5243,6 +5288,7 @@ static char *create_unique_id(struct kmem_cache *s)
72773 return name;
72774 }
72775
72776 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72777 static int sysfs_slab_add(struct kmem_cache *s)
72778 {
72779 int err;
72780 @@ -5305,6 +5351,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
72781 kobject_del(&s->kobj);
72782 kobject_put(&s->kobj);
72783 }
72784 +#endif
72785
72786 /*
72787 * Need to buffer aliases during bootup until sysfs becomes
72788 @@ -5318,6 +5365,7 @@ struct saved_alias {
72789
72790 static struct saved_alias *alias_list;
72791
72792 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72793 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
72794 {
72795 struct saved_alias *al;
72796 @@ -5340,6 +5388,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
72797 alias_list = al;
72798 return 0;
72799 }
72800 +#endif
72801
72802 static int __init slab_sysfs_init(void)
72803 {
72804 diff --git a/mm/swap.c b/mm/swap.c
72805 index 14380e9..e244704 100644
72806 --- a/mm/swap.c
72807 +++ b/mm/swap.c
72808 @@ -30,6 +30,7 @@
72809 #include <linux/backing-dev.h>
72810 #include <linux/memcontrol.h>
72811 #include <linux/gfp.h>
72812 +#include <linux/hugetlb.h>
72813
72814 #include "internal.h"
72815
72816 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
72817
72818 __page_cache_release(page);
72819 dtor = get_compound_page_dtor(page);
72820 + if (!PageHuge(page))
72821 + BUG_ON(dtor != free_compound_page);
72822 (*dtor)(page);
72823 }
72824
72825 diff --git a/mm/swapfile.c b/mm/swapfile.c
72826 index d999f09..e00270a 100644
72827 --- a/mm/swapfile.c
72828 +++ b/mm/swapfile.c
72829 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
72830
72831 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
72832 /* Activity counter to indicate that a swapon or swapoff has occurred */
72833 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
72834 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
72835
72836 static inline unsigned char swap_count(unsigned char ent)
72837 {
72838 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
72839 }
72840 filp_close(swap_file, NULL);
72841 err = 0;
72842 - atomic_inc(&proc_poll_event);
72843 + atomic_inc_unchecked(&proc_poll_event);
72844 wake_up_interruptible(&proc_poll_wait);
72845
72846 out_dput:
72847 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
72848
72849 poll_wait(file, &proc_poll_wait, wait);
72850
72851 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
72852 - seq->poll_event = atomic_read(&proc_poll_event);
72853 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
72854 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72855 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
72856 }
72857
72858 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
72859 return ret;
72860
72861 seq = file->private_data;
72862 - seq->poll_event = atomic_read(&proc_poll_event);
72863 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72864 return 0;
72865 }
72866
72867 @@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
72868 (p->flags & SWP_DISCARDABLE) ? "D" : "");
72869
72870 mutex_unlock(&swapon_mutex);
72871 - atomic_inc(&proc_poll_event);
72872 + atomic_inc_unchecked(&proc_poll_event);
72873 wake_up_interruptible(&proc_poll_wait);
72874
72875 if (S_ISREG(inode->i_mode))
72876 diff --git a/mm/util.c b/mm/util.c
72877 index 136ac4f..5117eef 100644
72878 --- a/mm/util.c
72879 +++ b/mm/util.c
72880 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
72881 * allocated buffer. Use this if you don't want to free the buffer immediately
72882 * like, for example, with RCU.
72883 */
72884 +#undef __krealloc
72885 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
72886 {
72887 void *ret;
72888 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
72889 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
72890 * %NULL pointer, the object pointed to is freed.
72891 */
72892 +#undef krealloc
72893 void *krealloc(const void *p, size_t new_size, gfp_t flags)
72894 {
72895 void *ret;
72896 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
72897 void arch_pick_mmap_layout(struct mm_struct *mm)
72898 {
72899 mm->mmap_base = TASK_UNMAPPED_BASE;
72900 +
72901 +#ifdef CONFIG_PAX_RANDMMAP
72902 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72903 + mm->mmap_base += mm->delta_mmap;
72904 +#endif
72905 +
72906 mm->get_unmapped_area = arch_get_unmapped_area;
72907 mm->unmap_area = arch_unmap_area;
72908 }
72909 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
72910 index 86ce9a5..0fa4d89 100644
72911 --- a/mm/vmalloc.c
72912 +++ b/mm/vmalloc.c
72913 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
72914
72915 pte = pte_offset_kernel(pmd, addr);
72916 do {
72917 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72918 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72919 +
72920 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72921 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
72922 + BUG_ON(!pte_exec(*pte));
72923 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
72924 + continue;
72925 + }
72926 +#endif
72927 +
72928 + {
72929 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72930 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72931 + }
72932 } while (pte++, addr += PAGE_SIZE, addr != end);
72933 }
72934
72935 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
72936 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
72937 {
72938 pte_t *pte;
72939 + int ret = -ENOMEM;
72940
72941 /*
72942 * nr is a running index into the array which helps higher level
72943 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
72944 pte = pte_alloc_kernel(pmd, addr);
72945 if (!pte)
72946 return -ENOMEM;
72947 +
72948 + pax_open_kernel();
72949 do {
72950 struct page *page = pages[*nr];
72951
72952 - if (WARN_ON(!pte_none(*pte)))
72953 - return -EBUSY;
72954 - if (WARN_ON(!page))
72955 - return -ENOMEM;
72956 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72957 + if (pgprot_val(prot) & _PAGE_NX)
72958 +#endif
72959 +
72960 + if (WARN_ON(!pte_none(*pte))) {
72961 + ret = -EBUSY;
72962 + goto out;
72963 + }
72964 + if (WARN_ON(!page)) {
72965 + ret = -ENOMEM;
72966 + goto out;
72967 + }
72968 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
72969 (*nr)++;
72970 } while (pte++, addr += PAGE_SIZE, addr != end);
72971 - return 0;
72972 + ret = 0;
72973 +out:
72974 + pax_close_kernel();
72975 + return ret;
72976 }
72977
72978 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
72979 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
72980 * and fall back on vmalloc() if that fails. Others
72981 * just put it in the vmalloc space.
72982 */
72983 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
72984 +#ifdef CONFIG_MODULES
72985 +#ifdef MODULES_VADDR
72986 unsigned long addr = (unsigned long)x;
72987 if (addr >= MODULES_VADDR && addr < MODULES_END)
72988 return 1;
72989 #endif
72990 +
72991 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72992 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
72993 + return 1;
72994 +#endif
72995 +
72996 +#endif
72997 +
72998 return is_vmalloc_addr(x);
72999 }
73000
73001 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73002
73003 if (!pgd_none(*pgd)) {
73004 pud_t *pud = pud_offset(pgd, addr);
73005 +#ifdef CONFIG_X86
73006 + if (!pud_large(*pud))
73007 +#endif
73008 if (!pud_none(*pud)) {
73009 pmd_t *pmd = pmd_offset(pud, addr);
73010 +#ifdef CONFIG_X86
73011 + if (!pmd_large(*pmd))
73012 +#endif
73013 if (!pmd_none(*pmd)) {
73014 pte_t *ptep, pte;
73015
73016 @@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73017 struct vm_struct *area;
73018
73019 BUG_ON(in_interrupt());
73020 +
73021 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73022 + if (flags & VM_KERNEXEC) {
73023 + if (start != VMALLOC_START || end != VMALLOC_END)
73024 + return NULL;
73025 + start = (unsigned long)MODULES_EXEC_VADDR;
73026 + end = (unsigned long)MODULES_EXEC_END;
73027 + }
73028 +#endif
73029 +
73030 if (flags & VM_IOREMAP) {
73031 int bit = fls(size);
73032
73033 @@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
73034 if (count > totalram_pages)
73035 return NULL;
73036
73037 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73038 + if (!(pgprot_val(prot) & _PAGE_NX))
73039 + flags |= VM_KERNEXEC;
73040 +#endif
73041 +
73042 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73043 __builtin_return_address(0));
73044 if (!area)
73045 @@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73046 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73047 goto fail;
73048
73049 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73050 + if (!(pgprot_val(prot) & _PAGE_NX))
73051 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73052 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73053 + else
73054 +#endif
73055 +
73056 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73057 start, end, node, gfp_mask, caller);
73058 if (!area)
73059 @@ -1704,6 +1766,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
73060 gfp_mask, prot, node, caller);
73061 }
73062
73063 +#undef __vmalloc
73064 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73065 {
73066 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
73067 @@ -1727,6 +1790,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
73068 * For tight control over page level allocator and protection flags
73069 * use __vmalloc() instead.
73070 */
73071 +#undef vmalloc
73072 void *vmalloc(unsigned long size)
73073 {
73074 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
73075 @@ -1743,6 +1807,7 @@ EXPORT_SYMBOL(vmalloc);
73076 * For tight control over page level allocator and protection flags
73077 * use __vmalloc() instead.
73078 */
73079 +#undef vzalloc
73080 void *vzalloc(unsigned long size)
73081 {
73082 return __vmalloc_node_flags(size, -1,
73083 @@ -1757,6 +1822,7 @@ EXPORT_SYMBOL(vzalloc);
73084 * The resulting memory area is zeroed so it can be mapped to userspace
73085 * without leaking data.
73086 */
73087 +#undef vmalloc_user
73088 void *vmalloc_user(unsigned long size)
73089 {
73090 struct vm_struct *area;
73091 @@ -1784,6 +1850,7 @@ EXPORT_SYMBOL(vmalloc_user);
73092 * For tight control over page level allocator and protection flags
73093 * use __vmalloc() instead.
73094 */
73095 +#undef vmalloc_node
73096 void *vmalloc_node(unsigned long size, int node)
73097 {
73098 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
73099 @@ -1803,6 +1870,7 @@ EXPORT_SYMBOL(vmalloc_node);
73100 * For tight control over page level allocator and protection flags
73101 * use __vmalloc_node() instead.
73102 */
73103 +#undef vzalloc_node
73104 void *vzalloc_node(unsigned long size, int node)
73105 {
73106 return __vmalloc_node_flags(size, node,
73107 @@ -1825,10 +1893,10 @@ EXPORT_SYMBOL(vzalloc_node);
73108 * For tight control over page level allocator and protection flags
73109 * use __vmalloc() instead.
73110 */
73111 -
73112 +#undef vmalloc_exec
73113 void *vmalloc_exec(unsigned long size)
73114 {
73115 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73116 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73117 -1, __builtin_return_address(0));
73118 }
73119
73120 @@ -1847,6 +1915,7 @@ void *vmalloc_exec(unsigned long size)
73121 * Allocate enough 32bit PA addressable pages to cover @size from the
73122 * page level allocator and map them into contiguous kernel virtual space.
73123 */
73124 +#undef vmalloc_32
73125 void *vmalloc_32(unsigned long size)
73126 {
73127 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
73128 @@ -1861,6 +1930,7 @@ EXPORT_SYMBOL(vmalloc_32);
73129 * The resulting memory area is 32bit addressable and zeroed so it can be
73130 * mapped to userspace without leaking data.
73131 */
73132 +#undef vmalloc_32_user
73133 void *vmalloc_32_user(unsigned long size)
73134 {
73135 struct vm_struct *area;
73136 @@ -2123,6 +2193,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73137 unsigned long uaddr = vma->vm_start;
73138 unsigned long usize = vma->vm_end - vma->vm_start;
73139
73140 + BUG_ON(vma->vm_mirror);
73141 +
73142 if ((PAGE_SIZE-1) & (unsigned long)addr)
73143 return -EINVAL;
73144
73145 diff --git a/mm/vmstat.c b/mm/vmstat.c
73146 index f600557..1459fc8 100644
73147 --- a/mm/vmstat.c
73148 +++ b/mm/vmstat.c
73149 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73150 *
73151 * vm_stat contains the global counters
73152 */
73153 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73154 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73155 EXPORT_SYMBOL(vm_stat);
73156
73157 #ifdef CONFIG_SMP
73158 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73159 v = p->vm_stat_diff[i];
73160 p->vm_stat_diff[i] = 0;
73161 local_irq_restore(flags);
73162 - atomic_long_add(v, &zone->vm_stat[i]);
73163 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73164 global_diff[i] += v;
73165 #ifdef CONFIG_NUMA
73166 /* 3 seconds idle till flush */
73167 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73168
73169 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73170 if (global_diff[i])
73171 - atomic_long_add(global_diff[i], &vm_stat[i]);
73172 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73173 }
73174
73175 #endif
73176 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73177 start_cpu_timer(cpu);
73178 #endif
73179 #ifdef CONFIG_PROC_FS
73180 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73181 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73182 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73183 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73184 + {
73185 + mode_t gr_mode = S_IRUGO;
73186 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73187 + gr_mode = S_IRUSR;
73188 +#endif
73189 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73190 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73191 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73192 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73193 +#else
73194 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73195 +#endif
73196 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73197 + }
73198 #endif
73199 return 0;
73200 }
73201 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73202 index efea35b..9c8dd0b 100644
73203 --- a/net/8021q/vlan.c
73204 +++ b/net/8021q/vlan.c
73205 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73206 err = -EPERM;
73207 if (!capable(CAP_NET_ADMIN))
73208 break;
73209 - if ((args.u.name_type >= 0) &&
73210 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73211 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73212 struct vlan_net *vn;
73213
73214 vn = net_generic(net, vlan_net_id);
73215 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73216 index fccae26..e7ece2f 100644
73217 --- a/net/9p/trans_fd.c
73218 +++ b/net/9p/trans_fd.c
73219 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73220 oldfs = get_fs();
73221 set_fs(get_ds());
73222 /* The cast to a user pointer is valid due to the set_fs() */
73223 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73224 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73225 set_fs(oldfs);
73226
73227 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73228 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73229 index 876fbe8..8bbea9f 100644
73230 --- a/net/atm/atm_misc.c
73231 +++ b/net/atm/atm_misc.c
73232 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73233 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73234 return 1;
73235 atm_return(vcc, truesize);
73236 - atomic_inc(&vcc->stats->rx_drop);
73237 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73238 return 0;
73239 }
73240 EXPORT_SYMBOL(atm_charge);
73241 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73242 }
73243 }
73244 atm_return(vcc, guess);
73245 - atomic_inc(&vcc->stats->rx_drop);
73246 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73247 return NULL;
73248 }
73249 EXPORT_SYMBOL(atm_alloc_charge);
73250 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73251
73252 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73253 {
73254 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73255 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73256 __SONET_ITEMS
73257 #undef __HANDLE_ITEM
73258 }
73259 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73260
73261 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73262 {
73263 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73264 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73265 __SONET_ITEMS
73266 #undef __HANDLE_ITEM
73267 }
73268 diff --git a/net/atm/lec.h b/net/atm/lec.h
73269 index dfc0719..47c5322 100644
73270 --- a/net/atm/lec.h
73271 +++ b/net/atm/lec.h
73272 @@ -48,7 +48,7 @@ struct lane2_ops {
73273 const u8 *tlvs, u32 sizeoftlvs);
73274 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73275 const u8 *tlvs, u32 sizeoftlvs);
73276 -};
73277 +} __no_const;
73278
73279 /*
73280 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73281 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73282 index 0919a88..a23d54e 100644
73283 --- a/net/atm/mpc.h
73284 +++ b/net/atm/mpc.h
73285 @@ -33,7 +33,7 @@ struct mpoa_client {
73286 struct mpc_parameters parameters; /* parameters for this client */
73287
73288 const struct net_device_ops *old_ops;
73289 - struct net_device_ops new_ops;
73290 + net_device_ops_no_const new_ops;
73291 };
73292
73293
73294 diff --git a/net/atm/proc.c b/net/atm/proc.c
73295 index 0d020de..011c7bb 100644
73296 --- a/net/atm/proc.c
73297 +++ b/net/atm/proc.c
73298 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73299 const struct k_atm_aal_stats *stats)
73300 {
73301 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73302 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73303 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73304 - atomic_read(&stats->rx_drop));
73305 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73306 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73307 + atomic_read_unchecked(&stats->rx_drop));
73308 }
73309
73310 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73311 diff --git a/net/atm/resources.c b/net/atm/resources.c
73312 index 23f45ce..c748f1a 100644
73313 --- a/net/atm/resources.c
73314 +++ b/net/atm/resources.c
73315 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73316 static void copy_aal_stats(struct k_atm_aal_stats *from,
73317 struct atm_aal_stats *to)
73318 {
73319 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73320 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73321 __AAL_STAT_ITEMS
73322 #undef __HANDLE_ITEM
73323 }
73324 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73325 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73326 struct atm_aal_stats *to)
73327 {
73328 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73329 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73330 __AAL_STAT_ITEMS
73331 #undef __HANDLE_ITEM
73332 }
73333 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73334 index 3512e25..2b33401 100644
73335 --- a/net/batman-adv/bat_iv_ogm.c
73336 +++ b/net/batman-adv/bat_iv_ogm.c
73337 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73338
73339 /* change sequence number to network order */
73340 batman_ogm_packet->seqno =
73341 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73342 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73343
73344 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73345 batman_ogm_packet->tt_crc = htons((uint16_t)
73346 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73347 else
73348 batman_ogm_packet->gw_flags = NO_FLAGS;
73349
73350 - atomic_inc(&hard_iface->seqno);
73351 + atomic_inc_unchecked(&hard_iface->seqno);
73352
73353 slide_own_bcast_window(hard_iface);
73354 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73355 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
73356 return;
73357
73358 /* could be changed by schedule_own_packet() */
73359 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73360 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73361
73362 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73363
73364 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73365 index 7704df4..beb4e16 100644
73366 --- a/net/batman-adv/hard-interface.c
73367 +++ b/net/batman-adv/hard-interface.c
73368 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73369 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73370 dev_add_pack(&hard_iface->batman_adv_ptype);
73371
73372 - atomic_set(&hard_iface->seqno, 1);
73373 - atomic_set(&hard_iface->frag_seqno, 1);
73374 + atomic_set_unchecked(&hard_iface->seqno, 1);
73375 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73376 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73377 hard_iface->net_dev->name);
73378
73379 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73380 index 987c75a..20d6f36 100644
73381 --- a/net/batman-adv/soft-interface.c
73382 +++ b/net/batman-adv/soft-interface.c
73383 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73384
73385 /* set broadcast sequence number */
73386 bcast_packet->seqno =
73387 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73388 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73389
73390 add_bcast_packet_to_list(bat_priv, skb, 1);
73391
73392 @@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
73393 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73394
73395 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73396 - atomic_set(&bat_priv->bcast_seqno, 1);
73397 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73398 atomic_set(&bat_priv->ttvn, 0);
73399 atomic_set(&bat_priv->tt_local_changes, 0);
73400 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73401 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73402 index e9eb043..d174eeb 100644
73403 --- a/net/batman-adv/types.h
73404 +++ b/net/batman-adv/types.h
73405 @@ -38,8 +38,8 @@ struct hard_iface {
73406 int16_t if_num;
73407 char if_status;
73408 struct net_device *net_dev;
73409 - atomic_t seqno;
73410 - atomic_t frag_seqno;
73411 + atomic_unchecked_t seqno;
73412 + atomic_unchecked_t frag_seqno;
73413 unsigned char *packet_buff;
73414 int packet_len;
73415 struct kobject *hardif_obj;
73416 @@ -154,7 +154,7 @@ struct bat_priv {
73417 atomic_t orig_interval; /* uint */
73418 atomic_t hop_penalty; /* uint */
73419 atomic_t log_level; /* uint */
73420 - atomic_t bcast_seqno;
73421 + atomic_unchecked_t bcast_seqno;
73422 atomic_t bcast_queue_left;
73423 atomic_t batman_queue_left;
73424 atomic_t ttvn; /* translation table version number */
73425 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73426 index 07d1c1d..7e9bea9 100644
73427 --- a/net/batman-adv/unicast.c
73428 +++ b/net/batman-adv/unicast.c
73429 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73430 frag1->flags = UNI_FRAG_HEAD | large_tail;
73431 frag2->flags = large_tail;
73432
73433 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73434 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73435 frag1->seqno = htons(seqno - 1);
73436 frag2->seqno = htons(seqno);
73437
73438 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73439 index 07bc69e..21e76b1 100644
73440 --- a/net/bluetooth/hci_conn.c
73441 +++ b/net/bluetooth/hci_conn.c
73442 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73443 memset(&cp, 0, sizeof(cp));
73444
73445 cp.handle = cpu_to_le16(conn->handle);
73446 - memcpy(cp.ltk, ltk, sizeof(ltk));
73447 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73448
73449 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73450 }
73451 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73452 index 32d338c..d24bcdb 100644
73453 --- a/net/bluetooth/l2cap_core.c
73454 +++ b/net/bluetooth/l2cap_core.c
73455 @@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73456 break;
73457
73458 case L2CAP_CONF_RFC:
73459 - if (olen == sizeof(rfc))
73460 - memcpy(&rfc, (void *)val, olen);
73461 + if (olen != sizeof(rfc))
73462 + break;
73463 +
73464 + memcpy(&rfc, (void *)val, olen);
73465
73466 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73467 rfc.mode != chan->mode)
73468 @@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73469
73470 switch (type) {
73471 case L2CAP_CONF_RFC:
73472 - if (olen == sizeof(rfc))
73473 - memcpy(&rfc, (void *)val, olen);
73474 + if (olen != sizeof(rfc))
73475 + break;
73476 +
73477 + memcpy(&rfc, (void *)val, olen);
73478 goto done;
73479 }
73480 }
73481 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73482 index 5fe2ff3..10968b5 100644
73483 --- a/net/bridge/netfilter/ebtables.c
73484 +++ b/net/bridge/netfilter/ebtables.c
73485 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73486 tmp.valid_hooks = t->table->valid_hooks;
73487 }
73488 mutex_unlock(&ebt_mutex);
73489 - if (copy_to_user(user, &tmp, *len) != 0){
73490 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73491 BUGPRINT("c2u Didn't work\n");
73492 ret = -EFAULT;
73493 break;
73494 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
73495 index a97d97a..6f679ed 100644
73496 --- a/net/caif/caif_socket.c
73497 +++ b/net/caif/caif_socket.c
73498 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
73499 #ifdef CONFIG_DEBUG_FS
73500 struct debug_fs_counter {
73501 atomic_t caif_nr_socks;
73502 - atomic_t caif_sock_create;
73503 - atomic_t num_connect_req;
73504 - atomic_t num_connect_resp;
73505 - atomic_t num_connect_fail_resp;
73506 - atomic_t num_disconnect;
73507 - atomic_t num_remote_shutdown_ind;
73508 - atomic_t num_tx_flow_off_ind;
73509 - atomic_t num_tx_flow_on_ind;
73510 - atomic_t num_rx_flow_off;
73511 - atomic_t num_rx_flow_on;
73512 + atomic_unchecked_t caif_sock_create;
73513 + atomic_unchecked_t num_connect_req;
73514 + atomic_unchecked_t num_connect_resp;
73515 + atomic_unchecked_t num_connect_fail_resp;
73516 + atomic_unchecked_t num_disconnect;
73517 + atomic_unchecked_t num_remote_shutdown_ind;
73518 + atomic_unchecked_t num_tx_flow_off_ind;
73519 + atomic_unchecked_t num_tx_flow_on_ind;
73520 + atomic_unchecked_t num_rx_flow_off;
73521 + atomic_unchecked_t num_rx_flow_on;
73522 };
73523 static struct debug_fs_counter cnt;
73524 #define dbfs_atomic_inc(v) atomic_inc_return(v)
73525 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
73526 #define dbfs_atomic_dec(v) atomic_dec_return(v)
73527 #else
73528 #define dbfs_atomic_inc(v) 0
73529 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73530 atomic_read(&cf_sk->sk.sk_rmem_alloc),
73531 sk_rcvbuf_lowwater(cf_sk));
73532 set_rx_flow_off(cf_sk);
73533 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
73534 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73535 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73536 }
73537
73538 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73539 set_rx_flow_off(cf_sk);
73540 if (net_ratelimit())
73541 pr_debug("sending flow OFF due to rmem_schedule\n");
73542 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
73543 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73544 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73545 }
73546 skb->dev = NULL;
73547 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
73548 switch (flow) {
73549 case CAIF_CTRLCMD_FLOW_ON_IND:
73550 /* OK from modem to start sending again */
73551 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
73552 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
73553 set_tx_flow_on(cf_sk);
73554 cf_sk->sk.sk_state_change(&cf_sk->sk);
73555 break;
73556
73557 case CAIF_CTRLCMD_FLOW_OFF_IND:
73558 /* Modem asks us to shut up */
73559 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
73560 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
73561 set_tx_flow_off(cf_sk);
73562 cf_sk->sk.sk_state_change(&cf_sk->sk);
73563 break;
73564 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73565 /* We're now connected */
73566 caif_client_register_refcnt(&cf_sk->layer,
73567 cfsk_hold, cfsk_put);
73568 - dbfs_atomic_inc(&cnt.num_connect_resp);
73569 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
73570 cf_sk->sk.sk_state = CAIF_CONNECTED;
73571 set_tx_flow_on(cf_sk);
73572 cf_sk->sk.sk_state_change(&cf_sk->sk);
73573 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73574
73575 case CAIF_CTRLCMD_INIT_FAIL_RSP:
73576 /* Connect request failed */
73577 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
73578 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
73579 cf_sk->sk.sk_err = ECONNREFUSED;
73580 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
73581 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73582 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73583
73584 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
73585 /* Modem has closed this connection, or device is down. */
73586 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
73587 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
73588 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73589 cf_sk->sk.sk_err = ECONNRESET;
73590 set_rx_flow_on(cf_sk);
73591 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
73592 return;
73593
73594 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
73595 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
73596 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
73597 set_rx_flow_on(cf_sk);
73598 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
73599 }
73600 @@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
73601 /*ifindex = id of the interface.*/
73602 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
73603
73604 - dbfs_atomic_inc(&cnt.num_connect_req);
73605 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
73606 cf_sk->layer.receive = caif_sktrecv_cb;
73607
73608 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
73609 @@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
73610 spin_unlock_bh(&sk->sk_receive_queue.lock);
73611 sock->sk = NULL;
73612
73613 - dbfs_atomic_inc(&cnt.num_disconnect);
73614 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
73615
73616 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
73617 if (cf_sk->debugfs_socket_dir != NULL)
73618 @@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
73619 cf_sk->conn_req.protocol = protocol;
73620 /* Increase the number of sockets created. */
73621 dbfs_atomic_inc(&cnt.caif_nr_socks);
73622 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
73623 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
73624 #ifdef CONFIG_DEBUG_FS
73625 if (!IS_ERR(debugfsdir)) {
73626
73627 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73628 index 5cf5222..6f704ad 100644
73629 --- a/net/caif/cfctrl.c
73630 +++ b/net/caif/cfctrl.c
73631 @@ -9,6 +9,7 @@
73632 #include <linux/stddef.h>
73633 #include <linux/spinlock.h>
73634 #include <linux/slab.h>
73635 +#include <linux/sched.h>
73636 #include <net/caif/caif_layer.h>
73637 #include <net/caif/cfpkt.h>
73638 #include <net/caif/cfctrl.h>
73639 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73640 memset(&dev_info, 0, sizeof(dev_info));
73641 dev_info.id = 0xff;
73642 cfsrvl_init(&this->serv, 0, &dev_info, false);
73643 - atomic_set(&this->req_seq_no, 1);
73644 - atomic_set(&this->rsp_seq_no, 1);
73645 + atomic_set_unchecked(&this->req_seq_no, 1);
73646 + atomic_set_unchecked(&this->rsp_seq_no, 1);
73647 this->serv.layer.receive = cfctrl_recv;
73648 sprintf(this->serv.layer.name, "ctrl");
73649 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73650 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73651 struct cfctrl_request_info *req)
73652 {
73653 spin_lock_bh(&ctrl->info_list_lock);
73654 - atomic_inc(&ctrl->req_seq_no);
73655 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
73656 + atomic_inc_unchecked(&ctrl->req_seq_no);
73657 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73658 list_add_tail(&req->list, &ctrl->list);
73659 spin_unlock_bh(&ctrl->info_list_lock);
73660 }
73661 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73662 if (p != first)
73663 pr_warn("Requests are not received in order\n");
73664
73665 - atomic_set(&ctrl->rsp_seq_no,
73666 + atomic_set_unchecked(&ctrl->rsp_seq_no,
73667 p->sequence_no);
73668 list_del(&p->list);
73669 goto out;
73670 diff --git a/net/can/gw.c b/net/can/gw.c
73671 index 3d79b12..8de85fa 100644
73672 --- a/net/can/gw.c
73673 +++ b/net/can/gw.c
73674 @@ -96,7 +96,7 @@ struct cf_mod {
73675 struct {
73676 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73677 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73678 - } csumfunc;
73679 + } __no_const csumfunc;
73680 };
73681
73682
73683 diff --git a/net/compat.c b/net/compat.c
73684 index 6def90e..c6992fa 100644
73685 --- a/net/compat.c
73686 +++ b/net/compat.c
73687 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73688 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73689 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73690 return -EFAULT;
73691 - kmsg->msg_name = compat_ptr(tmp1);
73692 - kmsg->msg_iov = compat_ptr(tmp2);
73693 - kmsg->msg_control = compat_ptr(tmp3);
73694 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73695 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73696 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73697 return 0;
73698 }
73699
73700 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73701
73702 if (kern_msg->msg_namelen) {
73703 if (mode == VERIFY_READ) {
73704 - int err = move_addr_to_kernel(kern_msg->msg_name,
73705 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
73706 kern_msg->msg_namelen,
73707 kern_address);
73708 if (err < 0)
73709 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73710 kern_msg->msg_name = NULL;
73711
73712 tot_len = iov_from_user_compat_to_kern(kern_iov,
73713 - (struct compat_iovec __user *)kern_msg->msg_iov,
73714 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
73715 kern_msg->msg_iovlen);
73716 if (tot_len >= 0)
73717 kern_msg->msg_iov = kern_iov;
73718 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73719
73720 #define CMSG_COMPAT_FIRSTHDR(msg) \
73721 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
73722 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
73723 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
73724 (struct compat_cmsghdr __user *)NULL)
73725
73726 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
73727 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
73728 (ucmlen) <= (unsigned long) \
73729 ((mhdr)->msg_controllen - \
73730 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
73731 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
73732
73733 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
73734 struct compat_cmsghdr __user *cmsg, int cmsg_len)
73735 {
73736 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
73737 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
73738 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
73739 msg->msg_controllen)
73740 return NULL;
73741 return (struct compat_cmsghdr __user *)ptr;
73742 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73743 {
73744 struct compat_timeval ctv;
73745 struct compat_timespec cts[3];
73746 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73747 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73748 struct compat_cmsghdr cmhdr;
73749 int cmlen;
73750
73751 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73752
73753 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
73754 {
73755 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73756 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73757 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
73758 int fdnum = scm->fp->count;
73759 struct file **fp = scm->fp->fp;
73760 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
73761 return -EFAULT;
73762 old_fs = get_fs();
73763 set_fs(KERNEL_DS);
73764 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
73765 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
73766 set_fs(old_fs);
73767
73768 return err;
73769 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
73770 len = sizeof(ktime);
73771 old_fs = get_fs();
73772 set_fs(KERNEL_DS);
73773 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
73774 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
73775 set_fs(old_fs);
73776
73777 if (!err) {
73778 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73779 case MCAST_JOIN_GROUP:
73780 case MCAST_LEAVE_GROUP:
73781 {
73782 - struct compat_group_req __user *gr32 = (void *)optval;
73783 + struct compat_group_req __user *gr32 = (void __user *)optval;
73784 struct group_req __user *kgr =
73785 compat_alloc_user_space(sizeof(struct group_req));
73786 u32 interface;
73787 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73788 case MCAST_BLOCK_SOURCE:
73789 case MCAST_UNBLOCK_SOURCE:
73790 {
73791 - struct compat_group_source_req __user *gsr32 = (void *)optval;
73792 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
73793 struct group_source_req __user *kgsr = compat_alloc_user_space(
73794 sizeof(struct group_source_req));
73795 u32 interface;
73796 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73797 }
73798 case MCAST_MSFILTER:
73799 {
73800 - struct compat_group_filter __user *gf32 = (void *)optval;
73801 + struct compat_group_filter __user *gf32 = (void __user *)optval;
73802 struct group_filter __user *kgf;
73803 u32 interface, fmode, numsrc;
73804
73805 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
73806 char __user *optval, int __user *optlen,
73807 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
73808 {
73809 - struct compat_group_filter __user *gf32 = (void *)optval;
73810 + struct compat_group_filter __user *gf32 = (void __user *)optval;
73811 struct group_filter __user *kgf;
73812 int __user *koptlen;
73813 u32 interface, fmode, numsrc;
73814 diff --git a/net/core/datagram.c b/net/core/datagram.c
73815 index 68bbf9f..5ef0d12 100644
73816 --- a/net/core/datagram.c
73817 +++ b/net/core/datagram.c
73818 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
73819 }
73820
73821 kfree_skb(skb);
73822 - atomic_inc(&sk->sk_drops);
73823 + atomic_inc_unchecked(&sk->sk_drops);
73824 sk_mem_reclaim_partial(sk);
73825
73826 return err;
73827 diff --git a/net/core/dev.c b/net/core/dev.c
73828 index 6ca32f6..c7e9bbd 100644
73829 --- a/net/core/dev.c
73830 +++ b/net/core/dev.c
73831 @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
73832 if (no_module && capable(CAP_NET_ADMIN))
73833 no_module = request_module("netdev-%s", name);
73834 if (no_module && capable(CAP_SYS_MODULE)) {
73835 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73836 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
73837 +#else
73838 if (!request_module("%s", name))
73839 pr_err("Loading kernel module for a network device "
73840 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
73841 "instead\n", name);
73842 +#endif
73843 }
73844 }
73845 EXPORT_SYMBOL(dev_load);
73846 @@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
73847 {
73848 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
73849 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
73850 - atomic_long_inc(&dev->rx_dropped);
73851 + atomic_long_inc_unchecked(&dev->rx_dropped);
73852 kfree_skb(skb);
73853 return NET_RX_DROP;
73854 }
73855 @@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
73856 nf_reset(skb);
73857
73858 if (unlikely(!is_skb_forwardable(dev, skb))) {
73859 - atomic_long_inc(&dev->rx_dropped);
73860 + atomic_long_inc_unchecked(&dev->rx_dropped);
73861 kfree_skb(skb);
73862 return NET_RX_DROP;
73863 }
73864 @@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
73865
73866 struct dev_gso_cb {
73867 void (*destructor)(struct sk_buff *skb);
73868 -};
73869 +} __no_const;
73870
73871 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
73872
73873 @@ -2913,7 +2917,7 @@ enqueue:
73874
73875 local_irq_restore(flags);
73876
73877 - atomic_long_inc(&skb->dev->rx_dropped);
73878 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
73879 kfree_skb(skb);
73880 return NET_RX_DROP;
73881 }
73882 @@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb)
73883 }
73884 EXPORT_SYMBOL(netif_rx_ni);
73885
73886 -static void net_tx_action(struct softirq_action *h)
73887 +static void net_tx_action(void)
73888 {
73889 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73890
73891 @@ -3273,7 +3277,7 @@ ncls:
73892 if (pt_prev) {
73893 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
73894 } else {
73895 - atomic_long_inc(&skb->dev->rx_dropped);
73896 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
73897 kfree_skb(skb);
73898 /* Jamal, now you will not able to escape explaining
73899 * me how you were going to use this. :-)
73900 @@ -3832,7 +3836,7 @@ void netif_napi_del(struct napi_struct *napi)
73901 }
73902 EXPORT_SYMBOL(netif_napi_del);
73903
73904 -static void net_rx_action(struct softirq_action *h)
73905 +static void net_rx_action(void)
73906 {
73907 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73908 unsigned long time_limit = jiffies + 2;
73909 @@ -5889,7 +5893,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
73910 } else {
73911 netdev_stats_to_stats64(storage, &dev->stats);
73912 }
73913 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
73914 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
73915 return storage;
73916 }
73917 EXPORT_SYMBOL(dev_get_stats);
73918 diff --git a/net/core/flow.c b/net/core/flow.c
73919 index e318c7e..168b1d0 100644
73920 --- a/net/core/flow.c
73921 +++ b/net/core/flow.c
73922 @@ -61,7 +61,7 @@ struct flow_cache {
73923 struct timer_list rnd_timer;
73924 };
73925
73926 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
73927 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
73928 EXPORT_SYMBOL(flow_cache_genid);
73929 static struct flow_cache flow_cache_global;
73930 static struct kmem_cache *flow_cachep __read_mostly;
73931 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
73932
73933 static int flow_entry_valid(struct flow_cache_entry *fle)
73934 {
73935 - if (atomic_read(&flow_cache_genid) != fle->genid)
73936 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
73937 return 0;
73938 if (fle->object && !fle->object->ops->check(fle->object))
73939 return 0;
73940 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
73941 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
73942 fcp->hash_count++;
73943 }
73944 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
73945 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
73946 flo = fle->object;
73947 if (!flo)
73948 goto ret_object;
73949 @@ -280,7 +280,7 @@ nocache:
73950 }
73951 flo = resolver(net, key, family, dir, flo, ctx);
73952 if (fle) {
73953 - fle->genid = atomic_read(&flow_cache_genid);
73954 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
73955 if (!IS_ERR(flo))
73956 fle->object = flo;
73957 else
73958 diff --git a/net/core/iovec.c b/net/core/iovec.c
73959 index c40f27e..7f49254 100644
73960 --- a/net/core/iovec.c
73961 +++ b/net/core/iovec.c
73962 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
73963 if (m->msg_namelen) {
73964 if (mode == VERIFY_READ) {
73965 void __user *namep;
73966 - namep = (void __user __force *) m->msg_name;
73967 + namep = (void __force_user *) m->msg_name;
73968 err = move_addr_to_kernel(namep, m->msg_namelen,
73969 address);
73970 if (err < 0)
73971 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
73972 }
73973
73974 size = m->msg_iovlen * sizeof(struct iovec);
73975 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
73976 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
73977 return -EFAULT;
73978
73979 m->msg_iov = iov;
73980 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
73981 index f965dce..92c792a 100644
73982 --- a/net/core/rtnetlink.c
73983 +++ b/net/core/rtnetlink.c
73984 @@ -57,7 +57,7 @@ struct rtnl_link {
73985 rtnl_doit_func doit;
73986 rtnl_dumpit_func dumpit;
73987 rtnl_calcit_func calcit;
73988 -};
73989 +} __no_const;
73990
73991 static DEFINE_MUTEX(rtnl_mutex);
73992
73993 diff --git a/net/core/scm.c b/net/core/scm.c
73994 index ff52ad0..aff1c0f 100644
73995 --- a/net/core/scm.c
73996 +++ b/net/core/scm.c
73997 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
73998 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
73999 {
74000 struct cmsghdr __user *cm
74001 - = (__force struct cmsghdr __user *)msg->msg_control;
74002 + = (struct cmsghdr __force_user *)msg->msg_control;
74003 struct cmsghdr cmhdr;
74004 int cmlen = CMSG_LEN(len);
74005 int err;
74006 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74007 err = -EFAULT;
74008 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74009 goto out;
74010 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74011 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74012 goto out;
74013 cmlen = CMSG_SPACE(len);
74014 if (msg->msg_controllen < cmlen)
74015 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
74016 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74017 {
74018 struct cmsghdr __user *cm
74019 - = (__force struct cmsghdr __user*)msg->msg_control;
74020 + = (struct cmsghdr __force_user *)msg->msg_control;
74021
74022 int fdmax = 0;
74023 int fdnum = scm->fp->count;
74024 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74025 if (fdnum < fdmax)
74026 fdmax = fdnum;
74027
74028 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74029 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74030 i++, cmfptr++)
74031 {
74032 int new_fd;
74033 diff --git a/net/core/sock.c b/net/core/sock.c
74034 index 02f8dfe..86dfd4a 100644
74035 --- a/net/core/sock.c
74036 +++ b/net/core/sock.c
74037 @@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74038 struct sk_buff_head *list = &sk->sk_receive_queue;
74039
74040 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74041 - atomic_inc(&sk->sk_drops);
74042 + atomic_inc_unchecked(&sk->sk_drops);
74043 trace_sock_rcvqueue_full(sk, skb);
74044 return -ENOMEM;
74045 }
74046 @@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74047 return err;
74048
74049 if (!sk_rmem_schedule(sk, skb->truesize)) {
74050 - atomic_inc(&sk->sk_drops);
74051 + atomic_inc_unchecked(&sk->sk_drops);
74052 return -ENOBUFS;
74053 }
74054
74055 @@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74056 skb_dst_force(skb);
74057
74058 spin_lock_irqsave(&list->lock, flags);
74059 - skb->dropcount = atomic_read(&sk->sk_drops);
74060 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74061 __skb_queue_tail(list, skb);
74062 spin_unlock_irqrestore(&list->lock, flags);
74063
74064 @@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74065 skb->dev = NULL;
74066
74067 if (sk_rcvqueues_full(sk, skb)) {
74068 - atomic_inc(&sk->sk_drops);
74069 + atomic_inc_unchecked(&sk->sk_drops);
74070 goto discard_and_relse;
74071 }
74072 if (nested)
74073 @@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74074 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74075 } else if (sk_add_backlog(sk, skb)) {
74076 bh_unlock_sock(sk);
74077 - atomic_inc(&sk->sk_drops);
74078 + atomic_inc_unchecked(&sk->sk_drops);
74079 goto discard_and_relse;
74080 }
74081
74082 @@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74083 if (len > sizeof(peercred))
74084 len = sizeof(peercred);
74085 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74086 - if (copy_to_user(optval, &peercred, len))
74087 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74088 return -EFAULT;
74089 goto lenout;
74090 }
74091 @@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74092 return -ENOTCONN;
74093 if (lv < len)
74094 return -EINVAL;
74095 - if (copy_to_user(optval, address, len))
74096 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74097 return -EFAULT;
74098 goto lenout;
74099 }
74100 @@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74101
74102 if (len > lv)
74103 len = lv;
74104 - if (copy_to_user(optval, &v, len))
74105 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74106 return -EFAULT;
74107 lenout:
74108 if (put_user(len, optlen))
74109 @@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74110 */
74111 smp_wmb();
74112 atomic_set(&sk->sk_refcnt, 1);
74113 - atomic_set(&sk->sk_drops, 0);
74114 + atomic_set_unchecked(&sk->sk_drops, 0);
74115 }
74116 EXPORT_SYMBOL(sock_init_data);
74117
74118 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74119 index b9868e1..849f809 100644
74120 --- a/net/core/sock_diag.c
74121 +++ b/net/core/sock_diag.c
74122 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74123
74124 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74125 {
74126 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74127 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74128 cookie[1] != INET_DIAG_NOCOOKIE) &&
74129 ((u32)(unsigned long)sk != cookie[0] ||
74130 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74131 return -ESTALE;
74132 else
74133 +#endif
74134 return 0;
74135 }
74136 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74137
74138 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74139 {
74140 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74141 + cookie[0] = 0;
74142 + cookie[1] = 0;
74143 +#else
74144 cookie[0] = (u32)(unsigned long)sk;
74145 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74146 +#endif
74147 }
74148 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74149
74150 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74151 index 02e75d1..9a57a7c 100644
74152 --- a/net/decnet/sysctl_net_decnet.c
74153 +++ b/net/decnet/sysctl_net_decnet.c
74154 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74155
74156 if (len > *lenp) len = *lenp;
74157
74158 - if (copy_to_user(buffer, addr, len))
74159 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74160 return -EFAULT;
74161
74162 *lenp = len;
74163 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74164
74165 if (len > *lenp) len = *lenp;
74166
74167 - if (copy_to_user(buffer, devname, len))
74168 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74169 return -EFAULT;
74170
74171 *lenp = len;
74172 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74173 index 39a2d29..f39c0fe 100644
74174 --- a/net/econet/Kconfig
74175 +++ b/net/econet/Kconfig
74176 @@ -4,7 +4,7 @@
74177
74178 config ECONET
74179 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74180 - depends on EXPERIMENTAL && INET
74181 + depends on EXPERIMENTAL && INET && BROKEN
74182 ---help---
74183 Econet is a fairly old and slow networking protocol mainly used by
74184 Acorn computers to access file and print servers. It uses native
74185 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74186 index 92fc5f6..b790d91 100644
74187 --- a/net/ipv4/fib_frontend.c
74188 +++ b/net/ipv4/fib_frontend.c
74189 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74190 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74191 fib_sync_up(dev);
74192 #endif
74193 - atomic_inc(&net->ipv4.dev_addr_genid);
74194 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74195 rt_cache_flush(dev_net(dev), -1);
74196 break;
74197 case NETDEV_DOWN:
74198 fib_del_ifaddr(ifa, NULL);
74199 - atomic_inc(&net->ipv4.dev_addr_genid);
74200 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74201 if (ifa->ifa_dev->ifa_list == NULL) {
74202 /* Last address was deleted from this interface.
74203 * Disable IP.
74204 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74205 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74206 fib_sync_up(dev);
74207 #endif
74208 - atomic_inc(&net->ipv4.dev_addr_genid);
74209 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74210 rt_cache_flush(dev_net(dev), -1);
74211 break;
74212 case NETDEV_DOWN:
74213 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74214 index 80106d8..232e898 100644
74215 --- a/net/ipv4/fib_semantics.c
74216 +++ b/net/ipv4/fib_semantics.c
74217 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74218 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74219 nh->nh_gw,
74220 nh->nh_parent->fib_scope);
74221 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74222 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74223
74224 return nh->nh_saddr;
74225 }
74226 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74227 index 984ec65..97ac518 100644
74228 --- a/net/ipv4/inet_hashtables.c
74229 +++ b/net/ipv4/inet_hashtables.c
74230 @@ -18,12 +18,15 @@
74231 #include <linux/sched.h>
74232 #include <linux/slab.h>
74233 #include <linux/wait.h>
74234 +#include <linux/security.h>
74235
74236 #include <net/inet_connection_sock.h>
74237 #include <net/inet_hashtables.h>
74238 #include <net/secure_seq.h>
74239 #include <net/ip.h>
74240
74241 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74242 +
74243 /*
74244 * Allocate and initialize a new local port bind bucket.
74245 * The bindhash mutex for snum's hash chain must be held here.
74246 @@ -530,6 +533,8 @@ ok:
74247 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74248 spin_unlock(&head->lock);
74249
74250 + gr_update_task_in_ip_table(current, inet_sk(sk));
74251 +
74252 if (tw) {
74253 inet_twsk_deschedule(tw, death_row);
74254 while (twrefcnt) {
74255 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74256 index d4d61b6..b81aec8 100644
74257 --- a/net/ipv4/inetpeer.c
74258 +++ b/net/ipv4/inetpeer.c
74259 @@ -487,8 +487,8 @@ relookup:
74260 if (p) {
74261 p->daddr = *daddr;
74262 atomic_set(&p->refcnt, 1);
74263 - atomic_set(&p->rid, 0);
74264 - atomic_set(&p->ip_id_count,
74265 + atomic_set_unchecked(&p->rid, 0);
74266 + atomic_set_unchecked(&p->ip_id_count,
74267 (daddr->family == AF_INET) ?
74268 secure_ip_id(daddr->addr.a4) :
74269 secure_ipv6_id(daddr->addr.a6));
74270 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74271 index 1f23a57..7180dfe 100644
74272 --- a/net/ipv4/ip_fragment.c
74273 +++ b/net/ipv4/ip_fragment.c
74274 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74275 return 0;
74276
74277 start = qp->rid;
74278 - end = atomic_inc_return(&peer->rid);
74279 + end = atomic_inc_return_unchecked(&peer->rid);
74280 qp->rid = end;
74281
74282 rc = qp->q.fragments && (end - start) > max;
74283 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74284 index 8aa87c1..35c3248 100644
74285 --- a/net/ipv4/ip_sockglue.c
74286 +++ b/net/ipv4/ip_sockglue.c
74287 @@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74288 len = min_t(unsigned int, len, opt->optlen);
74289 if (put_user(len, optlen))
74290 return -EFAULT;
74291 - if (copy_to_user(optval, opt->__data, len))
74292 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74293 + copy_to_user(optval, opt->__data, len))
74294 return -EFAULT;
74295 return 0;
74296 }
74297 @@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74298 if (sk->sk_type != SOCK_STREAM)
74299 return -ENOPROTOOPT;
74300
74301 - msg.msg_control = optval;
74302 + msg.msg_control = (void __force_kernel *)optval;
74303 msg.msg_controllen = len;
74304 msg.msg_flags = flags;
74305
74306 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74307 index 6e412a6..6640538 100644
74308 --- a/net/ipv4/ipconfig.c
74309 +++ b/net/ipv4/ipconfig.c
74310 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74311
74312 mm_segment_t oldfs = get_fs();
74313 set_fs(get_ds());
74314 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74315 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74316 set_fs(oldfs);
74317 return res;
74318 }
74319 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74320
74321 mm_segment_t oldfs = get_fs();
74322 set_fs(get_ds());
74323 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74324 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74325 set_fs(oldfs);
74326 return res;
74327 }
74328 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74329
74330 mm_segment_t oldfs = get_fs();
74331 set_fs(get_ds());
74332 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74333 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74334 set_fs(oldfs);
74335 return res;
74336 }
74337 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74338 index 2133c30..5c4b40b 100644
74339 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
74340 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74341 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
74342
74343 *len = 0;
74344
74345 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
74346 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
74347 if (*octets == NULL)
74348 return 0;
74349
74350 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74351 index b072386..abdebcf 100644
74352 --- a/net/ipv4/ping.c
74353 +++ b/net/ipv4/ping.c
74354 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74355 sk_rmem_alloc_get(sp),
74356 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74357 atomic_read(&sp->sk_refcnt), sp,
74358 - atomic_read(&sp->sk_drops), len);
74359 + atomic_read_unchecked(&sp->sk_drops), len);
74360 }
74361
74362 static int ping_seq_show(struct seq_file *seq, void *v)
74363 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74364 index 3ccda5a..3c1e61d 100644
74365 --- a/net/ipv4/raw.c
74366 +++ b/net/ipv4/raw.c
74367 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74368 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74369 {
74370 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74371 - atomic_inc(&sk->sk_drops);
74372 + atomic_inc_unchecked(&sk->sk_drops);
74373 kfree_skb(skb);
74374 return NET_RX_DROP;
74375 }
74376 @@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
74377
74378 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74379 {
74380 + struct icmp_filter filter;
74381 +
74382 if (optlen > sizeof(struct icmp_filter))
74383 optlen = sizeof(struct icmp_filter);
74384 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74385 + if (copy_from_user(&filter, optval, optlen))
74386 return -EFAULT;
74387 + raw_sk(sk)->filter = filter;
74388 return 0;
74389 }
74390
74391 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74392 {
74393 int len, ret = -EFAULT;
74394 + struct icmp_filter filter;
74395
74396 if (get_user(len, optlen))
74397 goto out;
74398 @@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74399 if (len > sizeof(struct icmp_filter))
74400 len = sizeof(struct icmp_filter);
74401 ret = -EFAULT;
74402 - if (put_user(len, optlen) ||
74403 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74404 + filter = raw_sk(sk)->filter;
74405 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74406 goto out;
74407 ret = 0;
74408 out: return ret;
74409 @@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74410 sk_wmem_alloc_get(sp),
74411 sk_rmem_alloc_get(sp),
74412 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74413 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74414 + atomic_read(&sp->sk_refcnt),
74415 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74416 + NULL,
74417 +#else
74418 + sp,
74419 +#endif
74420 + atomic_read_unchecked(&sp->sk_drops));
74421 }
74422
74423 static int raw_seq_show(struct seq_file *seq, void *v)
74424 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74425 index 0197747..7adb0dc 100644
74426 --- a/net/ipv4/route.c
74427 +++ b/net/ipv4/route.c
74428 @@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74429
74430 static inline int rt_genid(struct net *net)
74431 {
74432 - return atomic_read(&net->ipv4.rt_genid);
74433 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74434 }
74435
74436 #ifdef CONFIG_PROC_FS
74437 @@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
74438 unsigned char shuffle;
74439
74440 get_random_bytes(&shuffle, sizeof(shuffle));
74441 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74442 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74443 inetpeer_invalidate_tree(AF_INET);
74444 }
74445
74446 @@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
74447 error = rt->dst.error;
74448 if (peer) {
74449 inet_peer_refcheck(rt->peer);
74450 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74451 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74452 if (peer->tcp_ts_stamp) {
74453 ts = peer->tcp_ts;
74454 tsage = get_seconds() - peer->tcp_ts_stamp;
74455 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74456 index fd54c5f..96d6407 100644
74457 --- a/net/ipv4/tcp_ipv4.c
74458 +++ b/net/ipv4/tcp_ipv4.c
74459 @@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
74460 int sysctl_tcp_low_latency __read_mostly;
74461 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74462
74463 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74464 +extern int grsec_enable_blackhole;
74465 +#endif
74466
74467 #ifdef CONFIG_TCP_MD5SIG
74468 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
74469 @@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74470 return 0;
74471
74472 reset:
74473 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74474 + if (!grsec_enable_blackhole)
74475 +#endif
74476 tcp_v4_send_reset(rsk, skb);
74477 discard:
74478 kfree_skb(skb);
74479 @@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74480 TCP_SKB_CB(skb)->sacked = 0;
74481
74482 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74483 - if (!sk)
74484 + if (!sk) {
74485 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74486 + ret = 1;
74487 +#endif
74488 goto no_tcp_socket;
74489 -
74490 + }
74491 process:
74492 - if (sk->sk_state == TCP_TIME_WAIT)
74493 + if (sk->sk_state == TCP_TIME_WAIT) {
74494 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74495 + ret = 2;
74496 +#endif
74497 goto do_time_wait;
74498 + }
74499
74500 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74501 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74502 @@ -1755,6 +1768,10 @@ no_tcp_socket:
74503 bad_packet:
74504 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74505 } else {
74506 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74507 + if (!grsec_enable_blackhole || (ret == 1 &&
74508 + (skb->dev->flags & IFF_LOOPBACK)))
74509 +#endif
74510 tcp_v4_send_reset(NULL, skb);
74511 }
74512
74513 @@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74514 0, /* non standard timer */
74515 0, /* open_requests have no inode */
74516 atomic_read(&sk->sk_refcnt),
74517 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74518 + NULL,
74519 +#else
74520 req,
74521 +#endif
74522 len);
74523 }
74524
74525 @@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74526 sock_i_uid(sk),
74527 icsk->icsk_probes_out,
74528 sock_i_ino(sk),
74529 - atomic_read(&sk->sk_refcnt), sk,
74530 + atomic_read(&sk->sk_refcnt),
74531 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74532 + NULL,
74533 +#else
74534 + sk,
74535 +#endif
74536 jiffies_to_clock_t(icsk->icsk_rto),
74537 jiffies_to_clock_t(icsk->icsk_ack.ato),
74538 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74539 @@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74540 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74541 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74542 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74543 - atomic_read(&tw->tw_refcnt), tw, len);
74544 + atomic_read(&tw->tw_refcnt),
74545 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74546 + NULL,
74547 +#else
74548 + tw,
74549 +#endif
74550 + len);
74551 }
74552
74553 #define TMPSZ 150
74554 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74555 index 550e755..25721b3 100644
74556 --- a/net/ipv4/tcp_minisocks.c
74557 +++ b/net/ipv4/tcp_minisocks.c
74558 @@ -27,6 +27,10 @@
74559 #include <net/inet_common.h>
74560 #include <net/xfrm.h>
74561
74562 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74563 +extern int grsec_enable_blackhole;
74564 +#endif
74565 +
74566 int sysctl_tcp_syncookies __read_mostly = 1;
74567 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74568
74569 @@ -753,6 +757,10 @@ listen_overflow:
74570
74571 embryonic_reset:
74572 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74573 +
74574 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74575 + if (!grsec_enable_blackhole)
74576 +#endif
74577 if (!(flg & TCP_FLAG_RST))
74578 req->rsk_ops->send_reset(sk, skb);
74579
74580 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74581 index 85ee7eb..53277ab 100644
74582 --- a/net/ipv4/tcp_probe.c
74583 +++ b/net/ipv4/tcp_probe.c
74584 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74585 if (cnt + width >= len)
74586 break;
74587
74588 - if (copy_to_user(buf + cnt, tbuf, width))
74589 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74590 return -EFAULT;
74591 cnt += width;
74592 }
74593 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74594 index cd2e072..1fffee2 100644
74595 --- a/net/ipv4/tcp_timer.c
74596 +++ b/net/ipv4/tcp_timer.c
74597 @@ -22,6 +22,10 @@
74598 #include <linux/gfp.h>
74599 #include <net/tcp.h>
74600
74601 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74602 +extern int grsec_lastack_retries;
74603 +#endif
74604 +
74605 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74606 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74607 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74608 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74609 }
74610 }
74611
74612 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74613 + if ((sk->sk_state == TCP_LAST_ACK) &&
74614 + (grsec_lastack_retries > 0) &&
74615 + (grsec_lastack_retries < retry_until))
74616 + retry_until = grsec_lastack_retries;
74617 +#endif
74618 +
74619 if (retransmits_timed_out(sk, retry_until,
74620 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74621 /* Has it gone just too far? */
74622 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74623 index 5d075b5..d907d5f 100644
74624 --- a/net/ipv4/udp.c
74625 +++ b/net/ipv4/udp.c
74626 @@ -86,6 +86,7 @@
74627 #include <linux/types.h>
74628 #include <linux/fcntl.h>
74629 #include <linux/module.h>
74630 +#include <linux/security.h>
74631 #include <linux/socket.h>
74632 #include <linux/sockios.h>
74633 #include <linux/igmp.h>
74634 @@ -108,6 +109,10 @@
74635 #include <trace/events/udp.h>
74636 #include "udp_impl.h"
74637
74638 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74639 +extern int grsec_enable_blackhole;
74640 +#endif
74641 +
74642 struct udp_table udp_table __read_mostly;
74643 EXPORT_SYMBOL(udp_table);
74644
74645 @@ -566,6 +571,9 @@ found:
74646 return s;
74647 }
74648
74649 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74650 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74651 +
74652 /*
74653 * This routine is called by the ICMP module when it gets some
74654 * sort of error condition. If err < 0 then the socket should
74655 @@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74656 dport = usin->sin_port;
74657 if (dport == 0)
74658 return -EINVAL;
74659 +
74660 + err = gr_search_udp_sendmsg(sk, usin);
74661 + if (err)
74662 + return err;
74663 } else {
74664 if (sk->sk_state != TCP_ESTABLISHED)
74665 return -EDESTADDRREQ;
74666 +
74667 + err = gr_search_udp_sendmsg(sk, NULL);
74668 + if (err)
74669 + return err;
74670 +
74671 daddr = inet->inet_daddr;
74672 dport = inet->inet_dport;
74673 /* Open fast path for connected socket.
74674 @@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
74675 udp_lib_checksum_complete(skb)) {
74676 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74677 IS_UDPLITE(sk));
74678 - atomic_inc(&sk->sk_drops);
74679 + atomic_inc_unchecked(&sk->sk_drops);
74680 __skb_unlink(skb, rcvq);
74681 __skb_queue_tail(&list_kill, skb);
74682 }
74683 @@ -1186,6 +1203,10 @@ try_again:
74684 if (!skb)
74685 goto out;
74686
74687 + err = gr_search_udp_recvmsg(sk, skb);
74688 + if (err)
74689 + goto out_free;
74690 +
74691 ulen = skb->len - sizeof(struct udphdr);
74692 copied = len;
74693 if (copied > ulen)
74694 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74695
74696 drop:
74697 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74698 - atomic_inc(&sk->sk_drops);
74699 + atomic_inc_unchecked(&sk->sk_drops);
74700 kfree_skb(skb);
74701 return -1;
74702 }
74703 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74704 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
74705
74706 if (!skb1) {
74707 - atomic_inc(&sk->sk_drops);
74708 + atomic_inc_unchecked(&sk->sk_drops);
74709 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
74710 IS_UDPLITE(sk));
74711 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74712 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74713 goto csum_error;
74714
74715 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
74716 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74717 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74718 +#endif
74719 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
74720
74721 /*
74722 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
74723 sk_wmem_alloc_get(sp),
74724 sk_rmem_alloc_get(sp),
74725 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74726 - atomic_read(&sp->sk_refcnt), sp,
74727 - atomic_read(&sp->sk_drops), len);
74728 + atomic_read(&sp->sk_refcnt),
74729 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74730 + NULL,
74731 +#else
74732 + sp,
74733 +#endif
74734 + atomic_read_unchecked(&sp->sk_drops), len);
74735 }
74736
74737 int udp4_seq_show(struct seq_file *seq, void *v)
74738 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
74739 index 6b8ebc5..1d624f4 100644
74740 --- a/net/ipv6/addrconf.c
74741 +++ b/net/ipv6/addrconf.c
74742 @@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
74743 p.iph.ihl = 5;
74744 p.iph.protocol = IPPROTO_IPV6;
74745 p.iph.ttl = 64;
74746 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
74747 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
74748
74749 if (ops->ndo_do_ioctl) {
74750 mm_segment_t oldfs = get_fs();
74751 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
74752 index 02dd203..e03fcc9 100644
74753 --- a/net/ipv6/inet6_connection_sock.c
74754 +++ b/net/ipv6/inet6_connection_sock.c
74755 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
74756 #ifdef CONFIG_XFRM
74757 {
74758 struct rt6_info *rt = (struct rt6_info *)dst;
74759 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
74760 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
74761 }
74762 #endif
74763 }
74764 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
74765 #ifdef CONFIG_XFRM
74766 if (dst) {
74767 struct rt6_info *rt = (struct rt6_info *)dst;
74768 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
74769 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
74770 __sk_dst_reset(sk);
74771 dst = NULL;
74772 }
74773 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
74774 index 18a2719..779f36a 100644
74775 --- a/net/ipv6/ipv6_sockglue.c
74776 +++ b/net/ipv6/ipv6_sockglue.c
74777 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
74778 if (sk->sk_type != SOCK_STREAM)
74779 return -ENOPROTOOPT;
74780
74781 - msg.msg_control = optval;
74782 + msg.msg_control = (void __force_kernel *)optval;
74783 msg.msg_controllen = len;
74784 msg.msg_flags = flags;
74785
74786 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
74787 index d02f7e4..2d2a0f1 100644
74788 --- a/net/ipv6/raw.c
74789 +++ b/net/ipv6/raw.c
74790 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
74791 {
74792 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
74793 skb_checksum_complete(skb)) {
74794 - atomic_inc(&sk->sk_drops);
74795 + atomic_inc_unchecked(&sk->sk_drops);
74796 kfree_skb(skb);
74797 return NET_RX_DROP;
74798 }
74799 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
74800 struct raw6_sock *rp = raw6_sk(sk);
74801
74802 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
74803 - atomic_inc(&sk->sk_drops);
74804 + atomic_inc_unchecked(&sk->sk_drops);
74805 kfree_skb(skb);
74806 return NET_RX_DROP;
74807 }
74808 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
74809
74810 if (inet->hdrincl) {
74811 if (skb_checksum_complete(skb)) {
74812 - atomic_inc(&sk->sk_drops);
74813 + atomic_inc_unchecked(&sk->sk_drops);
74814 kfree_skb(skb);
74815 return NET_RX_DROP;
74816 }
74817 @@ -602,7 +602,7 @@ out:
74818 return err;
74819 }
74820
74821 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
74822 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
74823 struct flowi6 *fl6, struct dst_entry **dstp,
74824 unsigned int flags)
74825 {
74826 @@ -912,12 +912,15 @@ do_confirm:
74827 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
74828 char __user *optval, int optlen)
74829 {
74830 + struct icmp6_filter filter;
74831 +
74832 switch (optname) {
74833 case ICMPV6_FILTER:
74834 if (optlen > sizeof(struct icmp6_filter))
74835 optlen = sizeof(struct icmp6_filter);
74836 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
74837 + if (copy_from_user(&filter, optval, optlen))
74838 return -EFAULT;
74839 + raw6_sk(sk)->filter = filter;
74840 return 0;
74841 default:
74842 return -ENOPROTOOPT;
74843 @@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
74844 char __user *optval, int __user *optlen)
74845 {
74846 int len;
74847 + struct icmp6_filter filter;
74848
74849 switch (optname) {
74850 case ICMPV6_FILTER:
74851 @@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
74852 len = sizeof(struct icmp6_filter);
74853 if (put_user(len, optlen))
74854 return -EFAULT;
74855 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
74856 + filter = raw6_sk(sk)->filter;
74857 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
74858 return -EFAULT;
74859 return 0;
74860 default:
74861 @@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74862 0, 0L, 0,
74863 sock_i_uid(sp), 0,
74864 sock_i_ino(sp),
74865 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74866 + atomic_read(&sp->sk_refcnt),
74867 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74868 + NULL,
74869 +#else
74870 + sp,
74871 +#endif
74872 + atomic_read_unchecked(&sp->sk_drops));
74873 }
74874
74875 static int raw6_seq_show(struct seq_file *seq, void *v)
74876 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
74877 index 3edd05a..63aad01 100644
74878 --- a/net/ipv6/tcp_ipv6.c
74879 +++ b/net/ipv6/tcp_ipv6.c
74880 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
74881 }
74882 #endif
74883
74884 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74885 +extern int grsec_enable_blackhole;
74886 +#endif
74887 +
74888 static void tcp_v6_hash(struct sock *sk)
74889 {
74890 if (sk->sk_state != TCP_CLOSE) {
74891 @@ -1650,6 +1654,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
74892 return 0;
74893
74894 reset:
74895 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74896 + if (!grsec_enable_blackhole)
74897 +#endif
74898 tcp_v6_send_reset(sk, skb);
74899 discard:
74900 if (opt_skb)
74901 @@ -1729,12 +1736,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
74902 TCP_SKB_CB(skb)->sacked = 0;
74903
74904 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74905 - if (!sk)
74906 + if (!sk) {
74907 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74908 + ret = 1;
74909 +#endif
74910 goto no_tcp_socket;
74911 + }
74912
74913 process:
74914 - if (sk->sk_state == TCP_TIME_WAIT)
74915 + if (sk->sk_state == TCP_TIME_WAIT) {
74916 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74917 + ret = 2;
74918 +#endif
74919 goto do_time_wait;
74920 + }
74921
74922 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
74923 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74924 @@ -1782,6 +1797,10 @@ no_tcp_socket:
74925 bad_packet:
74926 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74927 } else {
74928 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74929 + if (!grsec_enable_blackhole || (ret == 1 &&
74930 + (skb->dev->flags & IFF_LOOPBACK)))
74931 +#endif
74932 tcp_v6_send_reset(NULL, skb);
74933 }
74934
74935 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
74936 uid,
74937 0, /* non standard timer */
74938 0, /* open_requests have no inode */
74939 - 0, req);
74940 + 0,
74941 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74942 + NULL
74943 +#else
74944 + req
74945 +#endif
74946 + );
74947 }
74948
74949 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74950 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74951 sock_i_uid(sp),
74952 icsk->icsk_probes_out,
74953 sock_i_ino(sp),
74954 - atomic_read(&sp->sk_refcnt), sp,
74955 + atomic_read(&sp->sk_refcnt),
74956 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74957 + NULL,
74958 +#else
74959 + sp,
74960 +#endif
74961 jiffies_to_clock_t(icsk->icsk_rto),
74962 jiffies_to_clock_t(icsk->icsk_ack.ato),
74963 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
74964 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
74965 dest->s6_addr32[2], dest->s6_addr32[3], destp,
74966 tw->tw_substate, 0, 0,
74967 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74968 - atomic_read(&tw->tw_refcnt), tw);
74969 + atomic_read(&tw->tw_refcnt),
74970 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74971 + NULL
74972 +#else
74973 + tw
74974 +#endif
74975 + );
74976 }
74977
74978 static int tcp6_seq_show(struct seq_file *seq, void *v)
74979 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
74980 index 4f96b5c..75543d7 100644
74981 --- a/net/ipv6/udp.c
74982 +++ b/net/ipv6/udp.c
74983 @@ -50,6 +50,10 @@
74984 #include <linux/seq_file.h>
74985 #include "udp_impl.h"
74986
74987 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74988 +extern int grsec_enable_blackhole;
74989 +#endif
74990 +
74991 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
74992 {
74993 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
74994 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
74995
74996 return 0;
74997 drop:
74998 - atomic_inc(&sk->sk_drops);
74999 + atomic_inc_unchecked(&sk->sk_drops);
75000 drop_no_sk_drops_inc:
75001 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75002 kfree_skb(skb);
75003 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75004 continue;
75005 }
75006 drop:
75007 - atomic_inc(&sk->sk_drops);
75008 + atomic_inc_unchecked(&sk->sk_drops);
75009 UDP6_INC_STATS_BH(sock_net(sk),
75010 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75011 UDP6_INC_STATS_BH(sock_net(sk),
75012 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75013 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75014 proto == IPPROTO_UDPLITE);
75015
75016 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75017 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75018 +#endif
75019 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75020
75021 kfree_skb(skb);
75022 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75023 if (!sock_owned_by_user(sk))
75024 udpv6_queue_rcv_skb(sk, skb);
75025 else if (sk_add_backlog(sk, skb)) {
75026 - atomic_inc(&sk->sk_drops);
75027 + atomic_inc_unchecked(&sk->sk_drops);
75028 bh_unlock_sock(sk);
75029 sock_put(sk);
75030 goto discard;
75031 @@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75032 0, 0L, 0,
75033 sock_i_uid(sp), 0,
75034 sock_i_ino(sp),
75035 - atomic_read(&sp->sk_refcnt), sp,
75036 - atomic_read(&sp->sk_drops));
75037 + atomic_read(&sp->sk_refcnt),
75038 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75039 + NULL,
75040 +#else
75041 + sp,
75042 +#endif
75043 + atomic_read_unchecked(&sp->sk_drops));
75044 }
75045
75046 int udp6_seq_show(struct seq_file *seq, void *v)
75047 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75048 index 253695d..9481ce8 100644
75049 --- a/net/irda/ircomm/ircomm_tty.c
75050 +++ b/net/irda/ircomm/ircomm_tty.c
75051 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75052 add_wait_queue(&self->open_wait, &wait);
75053
75054 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75055 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75056 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75057
75058 /* As far as I can see, we protect open_count - Jean II */
75059 spin_lock_irqsave(&self->spinlock, flags);
75060 if (!tty_hung_up_p(filp)) {
75061 extra_count = 1;
75062 - self->open_count--;
75063 + local_dec(&self->open_count);
75064 }
75065 spin_unlock_irqrestore(&self->spinlock, flags);
75066 - self->blocked_open++;
75067 + local_inc(&self->blocked_open);
75068
75069 while (1) {
75070 if (tty->termios->c_cflag & CBAUD) {
75071 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75072 }
75073
75074 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75075 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75076 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75077
75078 schedule();
75079 }
75080 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75081 if (extra_count) {
75082 /* ++ is not atomic, so this should be protected - Jean II */
75083 spin_lock_irqsave(&self->spinlock, flags);
75084 - self->open_count++;
75085 + local_inc(&self->open_count);
75086 spin_unlock_irqrestore(&self->spinlock, flags);
75087 }
75088 - self->blocked_open--;
75089 + local_dec(&self->blocked_open);
75090
75091 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75092 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75093 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75094
75095 if (!retval)
75096 self->flags |= ASYNC_NORMAL_ACTIVE;
75097 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75098 }
75099 /* ++ is not atomic, so this should be protected - Jean II */
75100 spin_lock_irqsave(&self->spinlock, flags);
75101 - self->open_count++;
75102 + local_inc(&self->open_count);
75103
75104 tty->driver_data = self;
75105 self->tty = tty;
75106 spin_unlock_irqrestore(&self->spinlock, flags);
75107
75108 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75109 - self->line, self->open_count);
75110 + self->line, local_read(&self->open_count));
75111
75112 /* Not really used by us, but lets do it anyway */
75113 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75114 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75115 return;
75116 }
75117
75118 - if ((tty->count == 1) && (self->open_count != 1)) {
75119 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75120 /*
75121 * Uh, oh. tty->count is 1, which means that the tty
75122 * structure will be freed. state->count should always
75123 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75124 */
75125 IRDA_DEBUG(0, "%s(), bad serial port count; "
75126 "tty->count is 1, state->count is %d\n", __func__ ,
75127 - self->open_count);
75128 - self->open_count = 1;
75129 + local_read(&self->open_count));
75130 + local_set(&self->open_count, 1);
75131 }
75132
75133 - if (--self->open_count < 0) {
75134 + if (local_dec_return(&self->open_count) < 0) {
75135 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75136 - __func__, self->line, self->open_count);
75137 - self->open_count = 0;
75138 + __func__, self->line, local_read(&self->open_count));
75139 + local_set(&self->open_count, 0);
75140 }
75141 - if (self->open_count) {
75142 + if (local_read(&self->open_count)) {
75143 spin_unlock_irqrestore(&self->spinlock, flags);
75144
75145 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75146 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75147 tty->closing = 0;
75148 self->tty = NULL;
75149
75150 - if (self->blocked_open) {
75151 + if (local_read(&self->blocked_open)) {
75152 if (self->close_delay)
75153 schedule_timeout_interruptible(self->close_delay);
75154 wake_up_interruptible(&self->open_wait);
75155 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75156 spin_lock_irqsave(&self->spinlock, flags);
75157 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75158 self->tty = NULL;
75159 - self->open_count = 0;
75160 + local_set(&self->open_count, 0);
75161 spin_unlock_irqrestore(&self->spinlock, flags);
75162
75163 wake_up_interruptible(&self->open_wait);
75164 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75165 seq_putc(m, '\n');
75166
75167 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75168 - seq_printf(m, "Open count: %d\n", self->open_count);
75169 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75170 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75171 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75172
75173 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75174 index d5c5b8f..33beff0 100644
75175 --- a/net/iucv/af_iucv.c
75176 +++ b/net/iucv/af_iucv.c
75177 @@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
75178
75179 write_lock_bh(&iucv_sk_list.lock);
75180
75181 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75182 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75183 while (__iucv_get_sock_by_name(name)) {
75184 sprintf(name, "%08x",
75185 - atomic_inc_return(&iucv_sk_list.autobind_name));
75186 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75187 }
75188
75189 write_unlock_bh(&iucv_sk_list.lock);
75190 diff --git a/net/key/af_key.c b/net/key/af_key.c
75191 index 11dbb22..c20f667 100644
75192 --- a/net/key/af_key.c
75193 +++ b/net/key/af_key.c
75194 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75195 static u32 get_acqseq(void)
75196 {
75197 u32 res;
75198 - static atomic_t acqseq;
75199 + static atomic_unchecked_t acqseq;
75200
75201 do {
75202 - res = atomic_inc_return(&acqseq);
75203 + res = atomic_inc_return_unchecked(&acqseq);
75204 } while (!res);
75205 return res;
75206 }
75207 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75208 index 2f0642d..e5c6fba 100644
75209 --- a/net/mac80211/ieee80211_i.h
75210 +++ b/net/mac80211/ieee80211_i.h
75211 @@ -28,6 +28,7 @@
75212 #include <net/ieee80211_radiotap.h>
75213 #include <net/cfg80211.h>
75214 #include <net/mac80211.h>
75215 +#include <asm/local.h>
75216 #include "key.h"
75217 #include "sta_info.h"
75218
75219 @@ -781,7 +782,7 @@ struct ieee80211_local {
75220 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75221 spinlock_t queue_stop_reason_lock;
75222
75223 - int open_count;
75224 + local_t open_count;
75225 int monitors, cooked_mntrs;
75226 /* number of interfaces with corresponding FIF_ flags */
75227 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75228 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75229 index 8e2137b..2974283 100644
75230 --- a/net/mac80211/iface.c
75231 +++ b/net/mac80211/iface.c
75232 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75233 break;
75234 }
75235
75236 - if (local->open_count == 0) {
75237 + if (local_read(&local->open_count) == 0) {
75238 res = drv_start(local);
75239 if (res)
75240 goto err_del_bss;
75241 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75242 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75243
75244 if (!is_valid_ether_addr(dev->dev_addr)) {
75245 - if (!local->open_count)
75246 + if (!local_read(&local->open_count))
75247 drv_stop(local);
75248 return -EADDRNOTAVAIL;
75249 }
75250 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75251 mutex_unlock(&local->mtx);
75252
75253 if (coming_up)
75254 - local->open_count++;
75255 + local_inc(&local->open_count);
75256
75257 if (hw_reconf_flags)
75258 ieee80211_hw_config(local, hw_reconf_flags);
75259 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75260 err_del_interface:
75261 drv_remove_interface(local, sdata);
75262 err_stop:
75263 - if (!local->open_count)
75264 + if (!local_read(&local->open_count))
75265 drv_stop(local);
75266 err_del_bss:
75267 sdata->bss = NULL;
75268 @@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75269 }
75270
75271 if (going_down)
75272 - local->open_count--;
75273 + local_dec(&local->open_count);
75274
75275 switch (sdata->vif.type) {
75276 case NL80211_IFTYPE_AP_VLAN:
75277 @@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75278
75279 ieee80211_recalc_ps(local, -1);
75280
75281 - if (local->open_count == 0) {
75282 + if (local_read(&local->open_count) == 0) {
75283 if (local->ops->napi_poll)
75284 napi_disable(&local->napi);
75285 ieee80211_clear_tx_pending(local);
75286 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75287 index b142bd4..a651749 100644
75288 --- a/net/mac80211/main.c
75289 +++ b/net/mac80211/main.c
75290 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75291 local->hw.conf.power_level = power;
75292 }
75293
75294 - if (changed && local->open_count) {
75295 + if (changed && local_read(&local->open_count)) {
75296 ret = drv_config(local, changed);
75297 /*
75298 * Goal:
75299 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75300 index 596efaf..8f1911f 100644
75301 --- a/net/mac80211/pm.c
75302 +++ b/net/mac80211/pm.c
75303 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75304 struct ieee80211_sub_if_data *sdata;
75305 struct sta_info *sta;
75306
75307 - if (!local->open_count)
75308 + if (!local_read(&local->open_count))
75309 goto suspend;
75310
75311 ieee80211_scan_cancel(local);
75312 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75313 cancel_work_sync(&local->dynamic_ps_enable_work);
75314 del_timer_sync(&local->dynamic_ps_timer);
75315
75316 - local->wowlan = wowlan && local->open_count;
75317 + local->wowlan = wowlan && local_read(&local->open_count);
75318 if (local->wowlan) {
75319 int err = drv_suspend(local, wowlan);
75320 if (err < 0) {
75321 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75322 }
75323
75324 /* stop hardware - this must stop RX */
75325 - if (local->open_count)
75326 + if (local_read(&local->open_count))
75327 ieee80211_stop_device(local);
75328
75329 suspend:
75330 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75331 index f9b8e81..bb89b46 100644
75332 --- a/net/mac80211/rate.c
75333 +++ b/net/mac80211/rate.c
75334 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75335
75336 ASSERT_RTNL();
75337
75338 - if (local->open_count)
75339 + if (local_read(&local->open_count))
75340 return -EBUSY;
75341
75342 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75343 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75344 index c97a065..ff61928 100644
75345 --- a/net/mac80211/rc80211_pid_debugfs.c
75346 +++ b/net/mac80211/rc80211_pid_debugfs.c
75347 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75348
75349 spin_unlock_irqrestore(&events->lock, status);
75350
75351 - if (copy_to_user(buf, pb, p))
75352 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75353 return -EFAULT;
75354
75355 return p;
75356 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75357 index 9919892..8c49803 100644
75358 --- a/net/mac80211/util.c
75359 +++ b/net/mac80211/util.c
75360 @@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75361 }
75362 #endif
75363 /* everything else happens only if HW was up & running */
75364 - if (!local->open_count)
75365 + if (!local_read(&local->open_count))
75366 goto wake_up;
75367
75368 /*
75369 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75370 index f8ac4ef..b02560b 100644
75371 --- a/net/netfilter/Kconfig
75372 +++ b/net/netfilter/Kconfig
75373 @@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
75374
75375 To compile it as a module, choose M here. If unsure, say N.
75376
75377 +config NETFILTER_XT_MATCH_GRADM
75378 + tristate '"gradm" match support'
75379 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75380 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75381 + ---help---
75382 + The gradm match allows to match on grsecurity RBAC being enabled.
75383 + It is useful when iptables rules are applied early on bootup to
75384 + prevent connections to the machine (except from a trusted host)
75385 + while the RBAC system is disabled.
75386 +
75387 config NETFILTER_XT_MATCH_HASHLIMIT
75388 tristate '"hashlimit" match support'
75389 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75390 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75391 index 40f4c3d..0d5dd6b 100644
75392 --- a/net/netfilter/Makefile
75393 +++ b/net/netfilter/Makefile
75394 @@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75395 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75396 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75397 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75398 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75399 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75400 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75401 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75402 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75403 index 29fa5ba..8debc79 100644
75404 --- a/net/netfilter/ipvs/ip_vs_conn.c
75405 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75406 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75407 /* Increase the refcnt counter of the dest */
75408 atomic_inc(&dest->refcnt);
75409
75410 - conn_flags = atomic_read(&dest->conn_flags);
75411 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75412 if (cp->protocol != IPPROTO_UDP)
75413 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75414 /* Bind with the destination and its corresponding transmitter */
75415 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75416 atomic_set(&cp->refcnt, 1);
75417
75418 atomic_set(&cp->n_control, 0);
75419 - atomic_set(&cp->in_pkts, 0);
75420 + atomic_set_unchecked(&cp->in_pkts, 0);
75421
75422 atomic_inc(&ipvs->conn_count);
75423 if (flags & IP_VS_CONN_F_NO_CPORT)
75424 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75425
75426 /* Don't drop the entry if its number of incoming packets is not
75427 located in [0, 8] */
75428 - i = atomic_read(&cp->in_pkts);
75429 + i = atomic_read_unchecked(&cp->in_pkts);
75430 if (i > 8 || i < 0) return 0;
75431
75432 if (!todrop_rate[i]) return 0;
75433 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75434 index 2555816..31492d9 100644
75435 --- a/net/netfilter/ipvs/ip_vs_core.c
75436 +++ b/net/netfilter/ipvs/ip_vs_core.c
75437 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75438 ret = cp->packet_xmit(skb, cp, pd->pp);
75439 /* do not touch skb anymore */
75440
75441 - atomic_inc(&cp->in_pkts);
75442 + atomic_inc_unchecked(&cp->in_pkts);
75443 ip_vs_conn_put(cp);
75444 return ret;
75445 }
75446 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75447 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75448 pkts = sysctl_sync_threshold(ipvs);
75449 else
75450 - pkts = atomic_add_return(1, &cp->in_pkts);
75451 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75452
75453 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75454 cp->protocol == IPPROTO_SCTP) {
75455 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75456 index b3afe18..08ec940 100644
75457 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75458 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75459 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75460 ip_vs_rs_hash(ipvs, dest);
75461 write_unlock_bh(&ipvs->rs_lock);
75462 }
75463 - atomic_set(&dest->conn_flags, conn_flags);
75464 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75465
75466 /* bind the service */
75467 if (!dest->svc) {
75468 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75469 " %-7s %-6d %-10d %-10d\n",
75470 &dest->addr.in6,
75471 ntohs(dest->port),
75472 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75473 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75474 atomic_read(&dest->weight),
75475 atomic_read(&dest->activeconns),
75476 atomic_read(&dest->inactconns));
75477 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75478 "%-7s %-6d %-10d %-10d\n",
75479 ntohl(dest->addr.ip),
75480 ntohs(dest->port),
75481 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75482 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75483 atomic_read(&dest->weight),
75484 atomic_read(&dest->activeconns),
75485 atomic_read(&dest->inactconns));
75486 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75487
75488 entry.addr = dest->addr.ip;
75489 entry.port = dest->port;
75490 - entry.conn_flags = atomic_read(&dest->conn_flags);
75491 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75492 entry.weight = atomic_read(&dest->weight);
75493 entry.u_threshold = dest->u_threshold;
75494 entry.l_threshold = dest->l_threshold;
75495 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75496 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75497
75498 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75499 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75500 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75501 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75502 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75503 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75504 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75505 index 8a0d6d6..90ec197 100644
75506 --- a/net/netfilter/ipvs/ip_vs_sync.c
75507 +++ b/net/netfilter/ipvs/ip_vs_sync.c
75508 @@ -649,7 +649,7 @@ control:
75509 * i.e only increment in_pkts for Templates.
75510 */
75511 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75512 - int pkts = atomic_add_return(1, &cp->in_pkts);
75513 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75514
75515 if (pkts % sysctl_sync_period(ipvs) != 1)
75516 return;
75517 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75518
75519 if (opt)
75520 memcpy(&cp->in_seq, opt, sizeof(*opt));
75521 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75522 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75523 cp->state = state;
75524 cp->old_state = cp->state;
75525 /*
75526 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75527 index 7fd66de..e6fb361 100644
75528 --- a/net/netfilter/ipvs/ip_vs_xmit.c
75529 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
75530 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75531 else
75532 rc = NF_ACCEPT;
75533 /* do not touch skb anymore */
75534 - atomic_inc(&cp->in_pkts);
75535 + atomic_inc_unchecked(&cp->in_pkts);
75536 goto out;
75537 }
75538
75539 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75540 else
75541 rc = NF_ACCEPT;
75542 /* do not touch skb anymore */
75543 - atomic_inc(&cp->in_pkts);
75544 + atomic_inc_unchecked(&cp->in_pkts);
75545 goto out;
75546 }
75547
75548 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75549 index 66b2c54..c7884e3 100644
75550 --- a/net/netfilter/nfnetlink_log.c
75551 +++ b/net/netfilter/nfnetlink_log.c
75552 @@ -70,7 +70,7 @@ struct nfulnl_instance {
75553 };
75554
75555 static DEFINE_SPINLOCK(instances_lock);
75556 -static atomic_t global_seq;
75557 +static atomic_unchecked_t global_seq;
75558
75559 #define INSTANCE_BUCKETS 16
75560 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75561 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75562 /* global sequence number */
75563 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75564 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75565 - htonl(atomic_inc_return(&global_seq)));
75566 + htonl(atomic_inc_return_unchecked(&global_seq)));
75567
75568 if (data_len) {
75569 struct nlattr *nla;
75570 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75571 new file mode 100644
75572 index 0000000..6905327
75573 --- /dev/null
75574 +++ b/net/netfilter/xt_gradm.c
75575 @@ -0,0 +1,51 @@
75576 +/*
75577 + * gradm match for netfilter
75578 + * Copyright © Zbigniew Krzystolik, 2010
75579 + *
75580 + * This program is free software; you can redistribute it and/or modify
75581 + * it under the terms of the GNU General Public License; either version
75582 + * 2 or 3 as published by the Free Software Foundation.
75583 + */
75584 +#include <linux/module.h>
75585 +#include <linux/moduleparam.h>
75586 +#include <linux/skbuff.h>
75587 +#include <linux/netfilter/x_tables.h>
75588 +#include <linux/grsecurity.h>
75589 +#include <linux/netfilter/xt_gradm.h>
75590 +
75591 +static bool
75592 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
75593 +{
75594 + const struct xt_gradm_mtinfo *info = par->matchinfo;
75595 + bool retval = false;
75596 + if (gr_acl_is_enabled())
75597 + retval = true;
75598 + return retval ^ info->invflags;
75599 +}
75600 +
75601 +static struct xt_match gradm_mt_reg __read_mostly = {
75602 + .name = "gradm",
75603 + .revision = 0,
75604 + .family = NFPROTO_UNSPEC,
75605 + .match = gradm_mt,
75606 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
75607 + .me = THIS_MODULE,
75608 +};
75609 +
75610 +static int __init gradm_mt_init(void)
75611 +{
75612 + return xt_register_match(&gradm_mt_reg);
75613 +}
75614 +
75615 +static void __exit gradm_mt_exit(void)
75616 +{
75617 + xt_unregister_match(&gradm_mt_reg);
75618 +}
75619 +
75620 +module_init(gradm_mt_init);
75621 +module_exit(gradm_mt_exit);
75622 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
75623 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
75624 +MODULE_LICENSE("GPL");
75625 +MODULE_ALIAS("ipt_gradm");
75626 +MODULE_ALIAS("ip6t_gradm");
75627 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
75628 index 4fe4fb4..87a89e5 100644
75629 --- a/net/netfilter/xt_statistic.c
75630 +++ b/net/netfilter/xt_statistic.c
75631 @@ -19,7 +19,7 @@
75632 #include <linux/module.h>
75633
75634 struct xt_statistic_priv {
75635 - atomic_t count;
75636 + atomic_unchecked_t count;
75637 } ____cacheline_aligned_in_smp;
75638
75639 MODULE_LICENSE("GPL");
75640 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
75641 break;
75642 case XT_STATISTIC_MODE_NTH:
75643 do {
75644 - oval = atomic_read(&info->master->count);
75645 + oval = atomic_read_unchecked(&info->master->count);
75646 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
75647 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
75648 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
75649 if (nval == 0)
75650 ret = !ret;
75651 break;
75652 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
75653 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
75654 if (info->master == NULL)
75655 return -ENOMEM;
75656 - atomic_set(&info->master->count, info->u.nth.count);
75657 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
75658
75659 return 0;
75660 }
75661 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
75662 index 629b061..21cd04c 100644
75663 --- a/net/netlink/af_netlink.c
75664 +++ b/net/netlink/af_netlink.c
75665 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
75666 sk->sk_error_report(sk);
75667 }
75668 }
75669 - atomic_inc(&sk->sk_drops);
75670 + atomic_inc_unchecked(&sk->sk_drops);
75671 }
75672
75673 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
75674 @@ -1995,7 +1995,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
75675 sk_wmem_alloc_get(s),
75676 nlk->cb,
75677 atomic_read(&s->sk_refcnt),
75678 - atomic_read(&s->sk_drops),
75679 + atomic_read_unchecked(&s->sk_drops),
75680 sock_i_ino(s)
75681 );
75682
75683 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
75684 index 7dab229..212156f 100644
75685 --- a/net/netrom/af_netrom.c
75686 +++ b/net/netrom/af_netrom.c
75687 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75688 struct sock *sk = sock->sk;
75689 struct nr_sock *nr = nr_sk(sk);
75690
75691 + memset(sax, 0, sizeof(*sax));
75692 lock_sock(sk);
75693 if (peer != 0) {
75694 if (sk->sk_state != TCP_ESTABLISHED) {
75695 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75696 *uaddr_len = sizeof(struct full_sockaddr_ax25);
75697 } else {
75698 sax->fsa_ax25.sax25_family = AF_NETROM;
75699 - sax->fsa_ax25.sax25_ndigis = 0;
75700 sax->fsa_ax25.sax25_call = nr->source_addr;
75701 *uaddr_len = sizeof(struct sockaddr_ax25);
75702 }
75703 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
75704 index 2dbb32b..a1b4722 100644
75705 --- a/net/packet/af_packet.c
75706 +++ b/net/packet/af_packet.c
75707 @@ -1676,7 +1676,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75708
75709 spin_lock(&sk->sk_receive_queue.lock);
75710 po->stats.tp_packets++;
75711 - skb->dropcount = atomic_read(&sk->sk_drops);
75712 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75713 __skb_queue_tail(&sk->sk_receive_queue, skb);
75714 spin_unlock(&sk->sk_receive_queue.lock);
75715 sk->sk_data_ready(sk, skb->len);
75716 @@ -1685,7 +1685,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75717 drop_n_acct:
75718 spin_lock(&sk->sk_receive_queue.lock);
75719 po->stats.tp_drops++;
75720 - atomic_inc(&sk->sk_drops);
75721 + atomic_inc_unchecked(&sk->sk_drops);
75722 spin_unlock(&sk->sk_receive_queue.lock);
75723
75724 drop_n_restore:
75725 @@ -3271,7 +3271,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75726 case PACKET_HDRLEN:
75727 if (len > sizeof(int))
75728 len = sizeof(int);
75729 - if (copy_from_user(&val, optval, len))
75730 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
75731 return -EFAULT;
75732 switch (val) {
75733 case TPACKET_V1:
75734 @@ -3321,7 +3321,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75735
75736 if (put_user(len, optlen))
75737 return -EFAULT;
75738 - if (copy_to_user(optval, data, len))
75739 + if (len > sizeof(st) || copy_to_user(optval, data, len))
75740 return -EFAULT;
75741 return 0;
75742 }
75743 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
75744 index d65f699..05aa6ce 100644
75745 --- a/net/phonet/af_phonet.c
75746 +++ b/net/phonet/af_phonet.c
75747 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
75748 {
75749 struct phonet_protocol *pp;
75750
75751 - if (protocol >= PHONET_NPROTO)
75752 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75753 return NULL;
75754
75755 rcu_read_lock();
75756 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
75757 {
75758 int err = 0;
75759
75760 - if (protocol >= PHONET_NPROTO)
75761 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75762 return -EINVAL;
75763
75764 err = proto_register(pp->prot, 1);
75765 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
75766 index 9f60008..ae96f04 100644
75767 --- a/net/phonet/pep.c
75768 +++ b/net/phonet/pep.c
75769 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75770
75771 case PNS_PEP_CTRL_REQ:
75772 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
75773 - atomic_inc(&sk->sk_drops);
75774 + atomic_inc_unchecked(&sk->sk_drops);
75775 break;
75776 }
75777 __skb_pull(skb, 4);
75778 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75779 }
75780
75781 if (pn->rx_credits == 0) {
75782 - atomic_inc(&sk->sk_drops);
75783 + atomic_inc_unchecked(&sk->sk_drops);
75784 err = -ENOBUFS;
75785 break;
75786 }
75787 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
75788 }
75789
75790 if (pn->rx_credits == 0) {
75791 - atomic_inc(&sk->sk_drops);
75792 + atomic_inc_unchecked(&sk->sk_drops);
75793 err = NET_RX_DROP;
75794 break;
75795 }
75796 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
75797 index 4c7eff3..59c727f 100644
75798 --- a/net/phonet/socket.c
75799 +++ b/net/phonet/socket.c
75800 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
75801 pn->resource, sk->sk_state,
75802 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
75803 sock_i_uid(sk), sock_i_ino(sk),
75804 - atomic_read(&sk->sk_refcnt), sk,
75805 - atomic_read(&sk->sk_drops), &len);
75806 + atomic_read(&sk->sk_refcnt),
75807 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75808 + NULL,
75809 +#else
75810 + sk,
75811 +#endif
75812 + atomic_read_unchecked(&sk->sk_drops), &len);
75813 }
75814 seq_printf(seq, "%*s\n", 127 - len, "");
75815 return 0;
75816 diff --git a/net/rds/cong.c b/net/rds/cong.c
75817 index e5b65ac..f3b6fb7 100644
75818 --- a/net/rds/cong.c
75819 +++ b/net/rds/cong.c
75820 @@ -78,7 +78,7 @@
75821 * finds that the saved generation number is smaller than the global generation
75822 * number, it wakes up the process.
75823 */
75824 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
75825 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
75826
75827 /*
75828 * Congestion monitoring
75829 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
75830 rdsdebug("waking map %p for %pI4\n",
75831 map, &map->m_addr);
75832 rds_stats_inc(s_cong_update_received);
75833 - atomic_inc(&rds_cong_generation);
75834 + atomic_inc_unchecked(&rds_cong_generation);
75835 if (waitqueue_active(&map->m_waitq))
75836 wake_up(&map->m_waitq);
75837 if (waitqueue_active(&rds_poll_waitq))
75838 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
75839
75840 int rds_cong_updated_since(unsigned long *recent)
75841 {
75842 - unsigned long gen = atomic_read(&rds_cong_generation);
75843 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
75844
75845 if (likely(*recent == gen))
75846 return 0;
75847 diff --git a/net/rds/ib.h b/net/rds/ib.h
75848 index edfaaaf..8c89879 100644
75849 --- a/net/rds/ib.h
75850 +++ b/net/rds/ib.h
75851 @@ -128,7 +128,7 @@ struct rds_ib_connection {
75852 /* sending acks */
75853 unsigned long i_ack_flags;
75854 #ifdef KERNEL_HAS_ATOMIC64
75855 - atomic64_t i_ack_next; /* next ACK to send */
75856 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
75857 #else
75858 spinlock_t i_ack_lock; /* protect i_ack_next */
75859 u64 i_ack_next; /* next ACK to send */
75860 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
75861 index 51c8689..36c555f 100644
75862 --- a/net/rds/ib_cm.c
75863 +++ b/net/rds/ib_cm.c
75864 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
75865 /* Clear the ACK state */
75866 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
75867 #ifdef KERNEL_HAS_ATOMIC64
75868 - atomic64_set(&ic->i_ack_next, 0);
75869 + atomic64_set_unchecked(&ic->i_ack_next, 0);
75870 #else
75871 ic->i_ack_next = 0;
75872 #endif
75873 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
75874 index e29e0ca..fa3a6a3 100644
75875 --- a/net/rds/ib_recv.c
75876 +++ b/net/rds/ib_recv.c
75877 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
75878 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
75879 int ack_required)
75880 {
75881 - atomic64_set(&ic->i_ack_next, seq);
75882 + atomic64_set_unchecked(&ic->i_ack_next, seq);
75883 if (ack_required) {
75884 smp_mb__before_clear_bit();
75885 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75886 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
75887 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75888 smp_mb__after_clear_bit();
75889
75890 - return atomic64_read(&ic->i_ack_next);
75891 + return atomic64_read_unchecked(&ic->i_ack_next);
75892 }
75893 #endif
75894
75895 diff --git a/net/rds/iw.h b/net/rds/iw.h
75896 index 04ce3b1..48119a6 100644
75897 --- a/net/rds/iw.h
75898 +++ b/net/rds/iw.h
75899 @@ -134,7 +134,7 @@ struct rds_iw_connection {
75900 /* sending acks */
75901 unsigned long i_ack_flags;
75902 #ifdef KERNEL_HAS_ATOMIC64
75903 - atomic64_t i_ack_next; /* next ACK to send */
75904 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
75905 #else
75906 spinlock_t i_ack_lock; /* protect i_ack_next */
75907 u64 i_ack_next; /* next ACK to send */
75908 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
75909 index 9556d28..f046d0e 100644
75910 --- a/net/rds/iw_cm.c
75911 +++ b/net/rds/iw_cm.c
75912 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
75913 /* Clear the ACK state */
75914 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
75915 #ifdef KERNEL_HAS_ATOMIC64
75916 - atomic64_set(&ic->i_ack_next, 0);
75917 + atomic64_set_unchecked(&ic->i_ack_next, 0);
75918 #else
75919 ic->i_ack_next = 0;
75920 #endif
75921 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
75922 index 5e57347..3916042 100644
75923 --- a/net/rds/iw_recv.c
75924 +++ b/net/rds/iw_recv.c
75925 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
75926 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
75927 int ack_required)
75928 {
75929 - atomic64_set(&ic->i_ack_next, seq);
75930 + atomic64_set_unchecked(&ic->i_ack_next, seq);
75931 if (ack_required) {
75932 smp_mb__before_clear_bit();
75933 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75934 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
75935 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75936 smp_mb__after_clear_bit();
75937
75938 - return atomic64_read(&ic->i_ack_next);
75939 + return atomic64_read_unchecked(&ic->i_ack_next);
75940 }
75941 #endif
75942
75943 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
75944 index edac9ef..16bcb98 100644
75945 --- a/net/rds/tcp.c
75946 +++ b/net/rds/tcp.c
75947 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
75948 int val = 1;
75949
75950 set_fs(KERNEL_DS);
75951 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
75952 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
75953 sizeof(val));
75954 set_fs(oldfs);
75955 }
75956 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
75957 index 1b4fd68..2234175 100644
75958 --- a/net/rds/tcp_send.c
75959 +++ b/net/rds/tcp_send.c
75960 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
75961
75962 oldfs = get_fs();
75963 set_fs(KERNEL_DS);
75964 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
75965 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
75966 sizeof(val));
75967 set_fs(oldfs);
75968 }
75969 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
75970 index 74c064c..fdec26f 100644
75971 --- a/net/rxrpc/af_rxrpc.c
75972 +++ b/net/rxrpc/af_rxrpc.c
75973 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
75974 __be32 rxrpc_epoch;
75975
75976 /* current debugging ID */
75977 -atomic_t rxrpc_debug_id;
75978 +atomic_unchecked_t rxrpc_debug_id;
75979
75980 /* count of skbs currently in use */
75981 atomic_t rxrpc_n_skbs;
75982 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
75983 index c3126e8..21facc7 100644
75984 --- a/net/rxrpc/ar-ack.c
75985 +++ b/net/rxrpc/ar-ack.c
75986 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
75987
75988 _enter("{%d,%d,%d,%d},",
75989 call->acks_hard, call->acks_unacked,
75990 - atomic_read(&call->sequence),
75991 + atomic_read_unchecked(&call->sequence),
75992 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
75993
75994 stop = 0;
75995 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
75996
75997 /* each Tx packet has a new serial number */
75998 sp->hdr.serial =
75999 - htonl(atomic_inc_return(&call->conn->serial));
76000 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
76001
76002 hdr = (struct rxrpc_header *) txb->head;
76003 hdr->serial = sp->hdr.serial;
76004 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
76005 */
76006 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
76007 {
76008 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
76009 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
76010 }
76011
76012 /*
76013 @@ -629,7 +629,7 @@ process_further:
76014
76015 latest = ntohl(sp->hdr.serial);
76016 hard = ntohl(ack.firstPacket);
76017 - tx = atomic_read(&call->sequence);
76018 + tx = atomic_read_unchecked(&call->sequence);
76019
76020 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76021 latest,
76022 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76023 goto maybe_reschedule;
76024
76025 send_ACK_with_skew:
76026 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76027 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76028 ntohl(ack.serial));
76029 send_ACK:
76030 mtu = call->conn->trans->peer->if_mtu;
76031 @@ -1173,7 +1173,7 @@ send_ACK:
76032 ackinfo.rxMTU = htonl(5692);
76033 ackinfo.jumbo_max = htonl(4);
76034
76035 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76036 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76037 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76038 ntohl(hdr.serial),
76039 ntohs(ack.maxSkew),
76040 @@ -1191,7 +1191,7 @@ send_ACK:
76041 send_message:
76042 _debug("send message");
76043
76044 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76045 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76046 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76047 send_message_2:
76048
76049 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76050 index bf656c2..48f9d27 100644
76051 --- a/net/rxrpc/ar-call.c
76052 +++ b/net/rxrpc/ar-call.c
76053 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76054 spin_lock_init(&call->lock);
76055 rwlock_init(&call->state_lock);
76056 atomic_set(&call->usage, 1);
76057 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76058 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76059 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76060
76061 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76062 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76063 index 4106ca9..a338d7a 100644
76064 --- a/net/rxrpc/ar-connection.c
76065 +++ b/net/rxrpc/ar-connection.c
76066 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76067 rwlock_init(&conn->lock);
76068 spin_lock_init(&conn->state_lock);
76069 atomic_set(&conn->usage, 1);
76070 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76071 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76072 conn->avail_calls = RXRPC_MAXCALLS;
76073 conn->size_align = 4;
76074 conn->header_size = sizeof(struct rxrpc_header);
76075 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76076 index e7ed43a..6afa140 100644
76077 --- a/net/rxrpc/ar-connevent.c
76078 +++ b/net/rxrpc/ar-connevent.c
76079 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76080
76081 len = iov[0].iov_len + iov[1].iov_len;
76082
76083 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76084 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76085 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76086
76087 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76088 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76089 index 1a2b0633..e8d1382 100644
76090 --- a/net/rxrpc/ar-input.c
76091 +++ b/net/rxrpc/ar-input.c
76092 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76093 /* track the latest serial number on this connection for ACK packet
76094 * information */
76095 serial = ntohl(sp->hdr.serial);
76096 - hi_serial = atomic_read(&call->conn->hi_serial);
76097 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76098 while (serial > hi_serial)
76099 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76100 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76101 serial);
76102
76103 /* request ACK generation for any ACK or DATA packet that requests
76104 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76105 index 8e22bd3..f66d1c0 100644
76106 --- a/net/rxrpc/ar-internal.h
76107 +++ b/net/rxrpc/ar-internal.h
76108 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76109 int error; /* error code for local abort */
76110 int debug_id; /* debug ID for printks */
76111 unsigned call_counter; /* call ID counter */
76112 - atomic_t serial; /* packet serial number counter */
76113 - atomic_t hi_serial; /* highest serial number received */
76114 + atomic_unchecked_t serial; /* packet serial number counter */
76115 + atomic_unchecked_t hi_serial; /* highest serial number received */
76116 u8 avail_calls; /* number of calls available */
76117 u8 size_align; /* data size alignment (for security) */
76118 u8 header_size; /* rxrpc + security header size */
76119 @@ -346,7 +346,7 @@ struct rxrpc_call {
76120 spinlock_t lock;
76121 rwlock_t state_lock; /* lock for state transition */
76122 atomic_t usage;
76123 - atomic_t sequence; /* Tx data packet sequence counter */
76124 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76125 u32 abort_code; /* local/remote abort code */
76126 enum { /* current state of call */
76127 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76128 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76129 */
76130 extern atomic_t rxrpc_n_skbs;
76131 extern __be32 rxrpc_epoch;
76132 -extern atomic_t rxrpc_debug_id;
76133 +extern atomic_unchecked_t rxrpc_debug_id;
76134 extern struct workqueue_struct *rxrpc_workqueue;
76135
76136 /*
76137 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76138 index 87f7135..74d3703 100644
76139 --- a/net/rxrpc/ar-local.c
76140 +++ b/net/rxrpc/ar-local.c
76141 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76142 spin_lock_init(&local->lock);
76143 rwlock_init(&local->services_lock);
76144 atomic_set(&local->usage, 1);
76145 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76146 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76147 memcpy(&local->srx, srx, sizeof(*srx));
76148 }
76149
76150 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76151 index 16ae887..d24f12b 100644
76152 --- a/net/rxrpc/ar-output.c
76153 +++ b/net/rxrpc/ar-output.c
76154 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76155 sp->hdr.cid = call->cid;
76156 sp->hdr.callNumber = call->call_id;
76157 sp->hdr.seq =
76158 - htonl(atomic_inc_return(&call->sequence));
76159 + htonl(atomic_inc_return_unchecked(&call->sequence));
76160 sp->hdr.serial =
76161 - htonl(atomic_inc_return(&conn->serial));
76162 + htonl(atomic_inc_return_unchecked(&conn->serial));
76163 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76164 sp->hdr.userStatus = 0;
76165 sp->hdr.securityIndex = conn->security_ix;
76166 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76167 index 2754f09..b20e38f 100644
76168 --- a/net/rxrpc/ar-peer.c
76169 +++ b/net/rxrpc/ar-peer.c
76170 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76171 INIT_LIST_HEAD(&peer->error_targets);
76172 spin_lock_init(&peer->lock);
76173 atomic_set(&peer->usage, 1);
76174 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76175 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76176 memcpy(&peer->srx, srx, sizeof(*srx));
76177
76178 rxrpc_assess_MTU_size(peer);
76179 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76180 index 38047f7..9f48511 100644
76181 --- a/net/rxrpc/ar-proc.c
76182 +++ b/net/rxrpc/ar-proc.c
76183 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76184 atomic_read(&conn->usage),
76185 rxrpc_conn_states[conn->state],
76186 key_serial(conn->key),
76187 - atomic_read(&conn->serial),
76188 - atomic_read(&conn->hi_serial));
76189 + atomic_read_unchecked(&conn->serial),
76190 + atomic_read_unchecked(&conn->hi_serial));
76191
76192 return 0;
76193 }
76194 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76195 index 92df566..87ec1bf 100644
76196 --- a/net/rxrpc/ar-transport.c
76197 +++ b/net/rxrpc/ar-transport.c
76198 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76199 spin_lock_init(&trans->client_lock);
76200 rwlock_init(&trans->conn_lock);
76201 atomic_set(&trans->usage, 1);
76202 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76203 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76204
76205 if (peer->srx.transport.family == AF_INET) {
76206 switch (peer->srx.transport_type) {
76207 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76208 index 7635107..4670276 100644
76209 --- a/net/rxrpc/rxkad.c
76210 +++ b/net/rxrpc/rxkad.c
76211 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76212
76213 len = iov[0].iov_len + iov[1].iov_len;
76214
76215 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76216 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76217 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76218
76219 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76220 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76221
76222 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76223
76224 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76225 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76226 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76227
76228 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76229 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76230 index 1e2eee8..ce3967e 100644
76231 --- a/net/sctp/proc.c
76232 +++ b/net/sctp/proc.c
76233 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76234 seq_printf(seq,
76235 "%8pK %8pK %-3d %-3d %-2d %-4d "
76236 "%4d %8d %8d %7d %5lu %-5d %5d ",
76237 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76238 + assoc, sk,
76239 + sctp_sk(sk)->type, sk->sk_state,
76240 assoc->state, hash,
76241 assoc->assoc_id,
76242 assoc->sndbuf_used,
76243 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76244 index 408ebd0..202aa85 100644
76245 --- a/net/sctp/socket.c
76246 +++ b/net/sctp/socket.c
76247 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76248 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76249 if (space_left < addrlen)
76250 return -ENOMEM;
76251 - if (copy_to_user(to, &temp, addrlen))
76252 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76253 return -EFAULT;
76254 to += addrlen;
76255 cnt++;
76256 diff --git a/net/socket.c b/net/socket.c
76257 index 28a96af..61a7a06 100644
76258 --- a/net/socket.c
76259 +++ b/net/socket.c
76260 @@ -88,6 +88,7 @@
76261 #include <linux/nsproxy.h>
76262 #include <linux/magic.h>
76263 #include <linux/slab.h>
76264 +#include <linux/in.h>
76265
76266 #include <asm/uaccess.h>
76267 #include <asm/unistd.h>
76268 @@ -105,6 +106,8 @@
76269 #include <linux/sockios.h>
76270 #include <linux/atalk.h>
76271
76272 +#include <linux/grsock.h>
76273 +
76274 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76275 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76276 unsigned long nr_segs, loff_t pos);
76277 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76278 &sockfs_dentry_operations, SOCKFS_MAGIC);
76279 }
76280
76281 -static struct vfsmount *sock_mnt __read_mostly;
76282 +struct vfsmount *sock_mnt __read_mostly;
76283
76284 static struct file_system_type sock_fs_type = {
76285 .name = "sockfs",
76286 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76287 return -EAFNOSUPPORT;
76288 if (type < 0 || type >= SOCK_MAX)
76289 return -EINVAL;
76290 + if (protocol < 0)
76291 + return -EINVAL;
76292
76293 /* Compatibility.
76294
76295 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76296 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76297 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76298
76299 + if(!gr_search_socket(family, type, protocol)) {
76300 + retval = -EACCES;
76301 + goto out;
76302 + }
76303 +
76304 + if (gr_handle_sock_all(family, type, protocol)) {
76305 + retval = -EACCES;
76306 + goto out;
76307 + }
76308 +
76309 retval = sock_create(family, type, protocol, &sock);
76310 if (retval < 0)
76311 goto out;
76312 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76313 if (sock) {
76314 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
76315 if (err >= 0) {
76316 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76317 + err = -EACCES;
76318 + goto error;
76319 + }
76320 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76321 + if (err)
76322 + goto error;
76323 +
76324 err = security_socket_bind(sock,
76325 (struct sockaddr *)&address,
76326 addrlen);
76327 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76328 (struct sockaddr *)
76329 &address, addrlen);
76330 }
76331 +error:
76332 fput_light(sock->file, fput_needed);
76333 }
76334 return err;
76335 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76336 if ((unsigned)backlog > somaxconn)
76337 backlog = somaxconn;
76338
76339 + if (gr_handle_sock_server_other(sock->sk)) {
76340 + err = -EPERM;
76341 + goto error;
76342 + }
76343 +
76344 + err = gr_search_listen(sock);
76345 + if (err)
76346 + goto error;
76347 +
76348 err = security_socket_listen(sock, backlog);
76349 if (!err)
76350 err = sock->ops->listen(sock, backlog);
76351
76352 +error:
76353 fput_light(sock->file, fput_needed);
76354 }
76355 return err;
76356 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76357 newsock->type = sock->type;
76358 newsock->ops = sock->ops;
76359
76360 + if (gr_handle_sock_server_other(sock->sk)) {
76361 + err = -EPERM;
76362 + sock_release(newsock);
76363 + goto out_put;
76364 + }
76365 +
76366 + err = gr_search_accept(sock);
76367 + if (err) {
76368 + sock_release(newsock);
76369 + goto out_put;
76370 + }
76371 +
76372 /*
76373 * We don't need try_module_get here, as the listening socket (sock)
76374 * has the protocol module (sock->ops->owner) held.
76375 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76376 fd_install(newfd, newfile);
76377 err = newfd;
76378
76379 + gr_attach_curr_ip(newsock->sk);
76380 +
76381 out_put:
76382 fput_light(sock->file, fput_needed);
76383 out:
76384 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76385 int, addrlen)
76386 {
76387 struct socket *sock;
76388 + struct sockaddr *sck;
76389 struct sockaddr_storage address;
76390 int err, fput_needed;
76391
76392 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76393 if (err < 0)
76394 goto out_put;
76395
76396 + sck = (struct sockaddr *)&address;
76397 +
76398 + if (gr_handle_sock_client(sck)) {
76399 + err = -EACCES;
76400 + goto out_put;
76401 + }
76402 +
76403 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76404 + if (err)
76405 + goto out_put;
76406 +
76407 err =
76408 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76409 if (err)
76410 @@ -1970,7 +2030,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76411 * checking falls down on this.
76412 */
76413 if (copy_from_user(ctl_buf,
76414 - (void __user __force *)msg_sys->msg_control,
76415 + (void __force_user *)msg_sys->msg_control,
76416 ctl_len))
76417 goto out_freectl;
76418 msg_sys->msg_control = ctl_buf;
76419 @@ -2140,7 +2200,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76420 * kernel msghdr to use the kernel address space)
76421 */
76422
76423 - uaddr = (__force void __user *)msg_sys->msg_name;
76424 + uaddr = (void __force_user *)msg_sys->msg_name;
76425 uaddr_len = COMPAT_NAMELEN(msg);
76426 if (MSG_CMSG_COMPAT & flags) {
76427 err = verify_compat_iovec(msg_sys, iov,
76428 @@ -2768,7 +2828,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76429 }
76430
76431 ifr = compat_alloc_user_space(buf_size);
76432 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76433 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76434
76435 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76436 return -EFAULT;
76437 @@ -2792,12 +2852,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76438 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76439
76440 if (copy_in_user(rxnfc, compat_rxnfc,
76441 - (void *)(&rxnfc->fs.m_ext + 1) -
76442 - (void *)rxnfc) ||
76443 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76444 + (void __user *)rxnfc) ||
76445 copy_in_user(&rxnfc->fs.ring_cookie,
76446 &compat_rxnfc->fs.ring_cookie,
76447 - (void *)(&rxnfc->fs.location + 1) -
76448 - (void *)&rxnfc->fs.ring_cookie) ||
76449 + (void __user *)(&rxnfc->fs.location + 1) -
76450 + (void __user *)&rxnfc->fs.ring_cookie) ||
76451 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76452 sizeof(rxnfc->rule_cnt)))
76453 return -EFAULT;
76454 @@ -2809,12 +2869,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76455
76456 if (convert_out) {
76457 if (copy_in_user(compat_rxnfc, rxnfc,
76458 - (const void *)(&rxnfc->fs.m_ext + 1) -
76459 - (const void *)rxnfc) ||
76460 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76461 + (const void __user *)rxnfc) ||
76462 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76463 &rxnfc->fs.ring_cookie,
76464 - (const void *)(&rxnfc->fs.location + 1) -
76465 - (const void *)&rxnfc->fs.ring_cookie) ||
76466 + (const void __user *)(&rxnfc->fs.location + 1) -
76467 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76468 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76469 sizeof(rxnfc->rule_cnt)))
76470 return -EFAULT;
76471 @@ -2884,7 +2944,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76472 old_fs = get_fs();
76473 set_fs(KERNEL_DS);
76474 err = dev_ioctl(net, cmd,
76475 - (struct ifreq __user __force *) &kifr);
76476 + (struct ifreq __force_user *) &kifr);
76477 set_fs(old_fs);
76478
76479 return err;
76480 @@ -2993,7 +3053,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76481
76482 old_fs = get_fs();
76483 set_fs(KERNEL_DS);
76484 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76485 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76486 set_fs(old_fs);
76487
76488 if (cmd == SIOCGIFMAP && !err) {
76489 @@ -3098,7 +3158,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
76490 ret |= __get_user(rtdev, &(ur4->rt_dev));
76491 if (rtdev) {
76492 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
76493 - r4.rt_dev = (char __user __force *)devname;
76494 + r4.rt_dev = (char __force_user *)devname;
76495 devname[15] = 0;
76496 } else
76497 r4.rt_dev = NULL;
76498 @@ -3324,8 +3384,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
76499 int __user *uoptlen;
76500 int err;
76501
76502 - uoptval = (char __user __force *) optval;
76503 - uoptlen = (int __user __force *) optlen;
76504 + uoptval = (char __force_user *) optval;
76505 + uoptlen = (int __force_user *) optlen;
76506
76507 set_fs(KERNEL_DS);
76508 if (level == SOL_SOCKET)
76509 @@ -3345,7 +3405,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
76510 char __user *uoptval;
76511 int err;
76512
76513 - uoptval = (char __user __force *) optval;
76514 + uoptval = (char __force_user *) optval;
76515
76516 set_fs(KERNEL_DS);
76517 if (level == SOL_SOCKET)
76518 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
76519 index 3341d89..c662621 100644
76520 --- a/net/sunrpc/sched.c
76521 +++ b/net/sunrpc/sched.c
76522 @@ -239,9 +239,9 @@ static int rpc_wait_bit_killable(void *word)
76523 #ifdef RPC_DEBUG
76524 static void rpc_task_set_debuginfo(struct rpc_task *task)
76525 {
76526 - static atomic_t rpc_pid;
76527 + static atomic_unchecked_t rpc_pid;
76528
76529 - task->tk_pid = atomic_inc_return(&rpc_pid);
76530 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
76531 }
76532 #else
76533 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
76534 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
76535 index 4645709..d41d668 100644
76536 --- a/net/sunrpc/svcsock.c
76537 +++ b/net/sunrpc/svcsock.c
76538 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
76539 int buflen, unsigned int base)
76540 {
76541 size_t save_iovlen;
76542 - void __user *save_iovbase;
76543 + void *save_iovbase;
76544 unsigned int i;
76545 int ret;
76546
76547 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
76548 index 09af4fa..77110a9 100644
76549 --- a/net/sunrpc/xprtrdma/svc_rdma.c
76550 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
76551 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
76552 static unsigned int min_max_inline = 4096;
76553 static unsigned int max_max_inline = 65536;
76554
76555 -atomic_t rdma_stat_recv;
76556 -atomic_t rdma_stat_read;
76557 -atomic_t rdma_stat_write;
76558 -atomic_t rdma_stat_sq_starve;
76559 -atomic_t rdma_stat_rq_starve;
76560 -atomic_t rdma_stat_rq_poll;
76561 -atomic_t rdma_stat_rq_prod;
76562 -atomic_t rdma_stat_sq_poll;
76563 -atomic_t rdma_stat_sq_prod;
76564 +atomic_unchecked_t rdma_stat_recv;
76565 +atomic_unchecked_t rdma_stat_read;
76566 +atomic_unchecked_t rdma_stat_write;
76567 +atomic_unchecked_t rdma_stat_sq_starve;
76568 +atomic_unchecked_t rdma_stat_rq_starve;
76569 +atomic_unchecked_t rdma_stat_rq_poll;
76570 +atomic_unchecked_t rdma_stat_rq_prod;
76571 +atomic_unchecked_t rdma_stat_sq_poll;
76572 +atomic_unchecked_t rdma_stat_sq_prod;
76573
76574 /* Temporary NFS request map and context caches */
76575 struct kmem_cache *svc_rdma_map_cachep;
76576 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
76577 len -= *ppos;
76578 if (len > *lenp)
76579 len = *lenp;
76580 - if (len && copy_to_user(buffer, str_buf, len))
76581 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
76582 return -EFAULT;
76583 *lenp = len;
76584 *ppos += len;
76585 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
76586 {
76587 .procname = "rdma_stat_read",
76588 .data = &rdma_stat_read,
76589 - .maxlen = sizeof(atomic_t),
76590 + .maxlen = sizeof(atomic_unchecked_t),
76591 .mode = 0644,
76592 .proc_handler = read_reset_stat,
76593 },
76594 {
76595 .procname = "rdma_stat_recv",
76596 .data = &rdma_stat_recv,
76597 - .maxlen = sizeof(atomic_t),
76598 + .maxlen = sizeof(atomic_unchecked_t),
76599 .mode = 0644,
76600 .proc_handler = read_reset_stat,
76601 },
76602 {
76603 .procname = "rdma_stat_write",
76604 .data = &rdma_stat_write,
76605 - .maxlen = sizeof(atomic_t),
76606 + .maxlen = sizeof(atomic_unchecked_t),
76607 .mode = 0644,
76608 .proc_handler = read_reset_stat,
76609 },
76610 {
76611 .procname = "rdma_stat_sq_starve",
76612 .data = &rdma_stat_sq_starve,
76613 - .maxlen = sizeof(atomic_t),
76614 + .maxlen = sizeof(atomic_unchecked_t),
76615 .mode = 0644,
76616 .proc_handler = read_reset_stat,
76617 },
76618 {
76619 .procname = "rdma_stat_rq_starve",
76620 .data = &rdma_stat_rq_starve,
76621 - .maxlen = sizeof(atomic_t),
76622 + .maxlen = sizeof(atomic_unchecked_t),
76623 .mode = 0644,
76624 .proc_handler = read_reset_stat,
76625 },
76626 {
76627 .procname = "rdma_stat_rq_poll",
76628 .data = &rdma_stat_rq_poll,
76629 - .maxlen = sizeof(atomic_t),
76630 + .maxlen = sizeof(atomic_unchecked_t),
76631 .mode = 0644,
76632 .proc_handler = read_reset_stat,
76633 },
76634 {
76635 .procname = "rdma_stat_rq_prod",
76636 .data = &rdma_stat_rq_prod,
76637 - .maxlen = sizeof(atomic_t),
76638 + .maxlen = sizeof(atomic_unchecked_t),
76639 .mode = 0644,
76640 .proc_handler = read_reset_stat,
76641 },
76642 {
76643 .procname = "rdma_stat_sq_poll",
76644 .data = &rdma_stat_sq_poll,
76645 - .maxlen = sizeof(atomic_t),
76646 + .maxlen = sizeof(atomic_unchecked_t),
76647 .mode = 0644,
76648 .proc_handler = read_reset_stat,
76649 },
76650 {
76651 .procname = "rdma_stat_sq_prod",
76652 .data = &rdma_stat_sq_prod,
76653 - .maxlen = sizeof(atomic_t),
76654 + .maxlen = sizeof(atomic_unchecked_t),
76655 .mode = 0644,
76656 .proc_handler = read_reset_stat,
76657 },
76658 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76659 index df67211..c354b13 100644
76660 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76661 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76662 @@ -499,7 +499,7 @@ next_sge:
76663 svc_rdma_put_context(ctxt, 0);
76664 goto out;
76665 }
76666 - atomic_inc(&rdma_stat_read);
76667 + atomic_inc_unchecked(&rdma_stat_read);
76668
76669 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
76670 chl_map->ch[ch_no].count -= read_wr.num_sge;
76671 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76672 dto_q);
76673 list_del_init(&ctxt->dto_q);
76674 } else {
76675 - atomic_inc(&rdma_stat_rq_starve);
76676 + atomic_inc_unchecked(&rdma_stat_rq_starve);
76677 clear_bit(XPT_DATA, &xprt->xpt_flags);
76678 ctxt = NULL;
76679 }
76680 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76681 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
76682 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
76683 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
76684 - atomic_inc(&rdma_stat_recv);
76685 + atomic_inc_unchecked(&rdma_stat_recv);
76686
76687 /* Build up the XDR from the receive buffers. */
76688 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
76689 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76690 index 249a835..fb2794b 100644
76691 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76692 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76693 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
76694 write_wr.wr.rdma.remote_addr = to;
76695
76696 /* Post It */
76697 - atomic_inc(&rdma_stat_write);
76698 + atomic_inc_unchecked(&rdma_stat_write);
76699 if (svc_rdma_send(xprt, &write_wr))
76700 goto err;
76701 return 0;
76702 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76703 index 894cb42..cf5bafb 100644
76704 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
76705 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76706 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76707 return;
76708
76709 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
76710 - atomic_inc(&rdma_stat_rq_poll);
76711 + atomic_inc_unchecked(&rdma_stat_rq_poll);
76712
76713 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
76714 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
76715 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76716 }
76717
76718 if (ctxt)
76719 - atomic_inc(&rdma_stat_rq_prod);
76720 + atomic_inc_unchecked(&rdma_stat_rq_prod);
76721
76722 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
76723 /*
76724 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76725 return;
76726
76727 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
76728 - atomic_inc(&rdma_stat_sq_poll);
76729 + atomic_inc_unchecked(&rdma_stat_sq_poll);
76730 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
76731 if (wc.status != IB_WC_SUCCESS)
76732 /* Close the transport */
76733 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76734 }
76735
76736 if (ctxt)
76737 - atomic_inc(&rdma_stat_sq_prod);
76738 + atomic_inc_unchecked(&rdma_stat_sq_prod);
76739 }
76740
76741 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
76742 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
76743 spin_lock_bh(&xprt->sc_lock);
76744 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
76745 spin_unlock_bh(&xprt->sc_lock);
76746 - atomic_inc(&rdma_stat_sq_starve);
76747 + atomic_inc_unchecked(&rdma_stat_sq_starve);
76748
76749 /* See if we can opportunistically reap SQ WR to make room */
76750 sq_cq_reap(xprt);
76751 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
76752 index e758139..d29ea47 100644
76753 --- a/net/sysctl_net.c
76754 +++ b/net/sysctl_net.c
76755 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
76756 struct ctl_table *table)
76757 {
76758 /* Allow network administrator to have same access as root. */
76759 - if (capable(CAP_NET_ADMIN)) {
76760 + if (capable_nolog(CAP_NET_ADMIN)) {
76761 int mode = (table->mode >> 6) & 7;
76762 return (mode << 6) | (mode << 3) | mode;
76763 }
76764 diff --git a/net/tipc/link.c b/net/tipc/link.c
76765 index ac1832a..533ed97 100644
76766 --- a/net/tipc/link.c
76767 +++ b/net/tipc/link.c
76768 @@ -1205,7 +1205,7 @@ static int link_send_sections_long(struct tipc_port *sender,
76769 struct tipc_msg fragm_hdr;
76770 struct sk_buff *buf, *buf_chain, *prev;
76771 u32 fragm_crs, fragm_rest, hsz, sect_rest;
76772 - const unchar *sect_crs;
76773 + const unchar __user *sect_crs;
76774 int curr_sect;
76775 u32 fragm_no;
76776
76777 @@ -1249,7 +1249,7 @@ again:
76778
76779 if (!sect_rest) {
76780 sect_rest = msg_sect[++curr_sect].iov_len;
76781 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
76782 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
76783 }
76784
76785 if (sect_rest < fragm_rest)
76786 @@ -1268,7 +1268,7 @@ error:
76787 }
76788 } else
76789 skb_copy_to_linear_data_offset(buf, fragm_crs,
76790 - sect_crs, sz);
76791 + (const void __force_kernel *)sect_crs, sz);
76792 sect_crs += sz;
76793 sect_rest -= sz;
76794 fragm_crs += sz;
76795 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
76796 index 3e4d3e2..27b55dc 100644
76797 --- a/net/tipc/msg.c
76798 +++ b/net/tipc/msg.c
76799 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
76800 msg_sect[cnt].iov_len);
76801 else
76802 skb_copy_to_linear_data_offset(*buf, pos,
76803 - msg_sect[cnt].iov_base,
76804 + (const void __force_kernel *)msg_sect[cnt].iov_base,
76805 msg_sect[cnt].iov_len);
76806 pos += msg_sect[cnt].iov_len;
76807 }
76808 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
76809 index 8c49566..14510cb 100644
76810 --- a/net/tipc/subscr.c
76811 +++ b/net/tipc/subscr.c
76812 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
76813 {
76814 struct iovec msg_sect;
76815
76816 - msg_sect.iov_base = (void *)&sub->evt;
76817 + msg_sect.iov_base = (void __force_user *)&sub->evt;
76818 msg_sect.iov_len = sizeof(struct tipc_event);
76819
76820 sub->evt.event = htohl(event, sub->swap);
76821 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
76822 index 85d3bb7..79f4487 100644
76823 --- a/net/unix/af_unix.c
76824 +++ b/net/unix/af_unix.c
76825 @@ -770,6 +770,12 @@ static struct sock *unix_find_other(struct net *net,
76826 err = -ECONNREFUSED;
76827 if (!S_ISSOCK(inode->i_mode))
76828 goto put_fail;
76829 +
76830 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
76831 + err = -EACCES;
76832 + goto put_fail;
76833 + }
76834 +
76835 u = unix_find_socket_byinode(inode);
76836 if (!u)
76837 goto put_fail;
76838 @@ -790,6 +796,13 @@ static struct sock *unix_find_other(struct net *net,
76839 if (u) {
76840 struct dentry *dentry;
76841 dentry = unix_sk(u)->dentry;
76842 +
76843 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
76844 + err = -EPERM;
76845 + sock_put(u);
76846 + goto fail;
76847 + }
76848 +
76849 if (dentry)
76850 touch_atime(unix_sk(u)->mnt, dentry);
76851 } else
76852 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
76853 err = security_path_mknod(&path, dentry, mode, 0);
76854 if (err)
76855 goto out_mknod_drop_write;
76856 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
76857 + err = -EACCES;
76858 + goto out_mknod_drop_write;
76859 + }
76860 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
76861 out_mknod_drop_write:
76862 mnt_drop_write(path.mnt);
76863 if (err)
76864 goto out_mknod_dput;
76865 +
76866 + gr_handle_create(dentry, path.mnt);
76867 +
76868 mutex_unlock(&path.dentry->d_inode->i_mutex);
76869 dput(path.dentry);
76870 path.dentry = dentry;
76871 diff --git a/net/wireless/core.h b/net/wireless/core.h
76872 index 43ad9c8..ab5127c 100644
76873 --- a/net/wireless/core.h
76874 +++ b/net/wireless/core.h
76875 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
76876 struct mutex mtx;
76877
76878 /* rfkill support */
76879 - struct rfkill_ops rfkill_ops;
76880 + rfkill_ops_no_const rfkill_ops;
76881 struct rfkill *rfkill;
76882 struct work_struct rfkill_sync;
76883
76884 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
76885 index 0af7f54..c916d2f 100644
76886 --- a/net/wireless/wext-core.c
76887 +++ b/net/wireless/wext-core.c
76888 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
76889 */
76890
76891 /* Support for very large requests */
76892 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
76893 - (user_length > descr->max_tokens)) {
76894 + if (user_length > descr->max_tokens) {
76895 /* Allow userspace to GET more than max so
76896 * we can support any size GET requests.
76897 * There is still a limit : -ENOMEM.
76898 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
76899 }
76900 }
76901
76902 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
76903 - /*
76904 - * If this is a GET, but not NOMAX, it means that the extra
76905 - * data is not bounded by userspace, but by max_tokens. Thus
76906 - * set the length to max_tokens. This matches the extra data
76907 - * allocation.
76908 - * The driver should fill it with the number of tokens it
76909 - * provided, and it may check iwp->length rather than having
76910 - * knowledge of max_tokens. If the driver doesn't change the
76911 - * iwp->length, this ioctl just copies back max_token tokens
76912 - * filled with zeroes. Hopefully the driver isn't claiming
76913 - * them to be valid data.
76914 - */
76915 - iwp->length = descr->max_tokens;
76916 - }
76917 -
76918 err = handler(dev, info, (union iwreq_data *) iwp, extra);
76919
76920 iwp->length += essid_compat;
76921 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
76922 index 7661576..80f7627 100644
76923 --- a/net/xfrm/xfrm_policy.c
76924 +++ b/net/xfrm/xfrm_policy.c
76925 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
76926 {
76927 policy->walk.dead = 1;
76928
76929 - atomic_inc(&policy->genid);
76930 + atomic_inc_unchecked(&policy->genid);
76931
76932 if (del_timer(&policy->timer))
76933 xfrm_pol_put(policy);
76934 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
76935 hlist_add_head(&policy->bydst, chain);
76936 xfrm_pol_hold(policy);
76937 net->xfrm.policy_count[dir]++;
76938 - atomic_inc(&flow_cache_genid);
76939 + atomic_inc_unchecked(&flow_cache_genid);
76940 if (delpol)
76941 __xfrm_policy_unlink(delpol, dir);
76942 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
76943 @@ -1530,7 +1530,7 @@ free_dst:
76944 goto out;
76945 }
76946
76947 -static int inline
76948 +static inline int
76949 xfrm_dst_alloc_copy(void **target, const void *src, int size)
76950 {
76951 if (!*target) {
76952 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
76953 return 0;
76954 }
76955
76956 -static int inline
76957 +static inline int
76958 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
76959 {
76960 #ifdef CONFIG_XFRM_SUB_POLICY
76961 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
76962 #endif
76963 }
76964
76965 -static int inline
76966 +static inline int
76967 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
76968 {
76969 #ifdef CONFIG_XFRM_SUB_POLICY
76970 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
76971
76972 xdst->num_pols = num_pols;
76973 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
76974 - xdst->policy_genid = atomic_read(&pols[0]->genid);
76975 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
76976
76977 return xdst;
76978 }
76979 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
76980 if (xdst->xfrm_genid != dst->xfrm->genid)
76981 return 0;
76982 if (xdst->num_pols > 0 &&
76983 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
76984 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
76985 return 0;
76986
76987 mtu = dst_mtu(dst->child);
76988 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
76989 sizeof(pol->xfrm_vec[i].saddr));
76990 pol->xfrm_vec[i].encap_family = mp->new_family;
76991 /* flush bundles */
76992 - atomic_inc(&pol->genid);
76993 + atomic_inc_unchecked(&pol->genid);
76994 }
76995 }
76996
76997 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
76998 index d2b366c..51ff91ebc 100644
76999 --- a/scripts/Makefile.build
77000 +++ b/scripts/Makefile.build
77001 @@ -109,7 +109,7 @@ endif
77002 endif
77003
77004 # Do not include host rules unless needed
77005 -ifneq ($(hostprogs-y)$(hostprogs-m),)
77006 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
77007 include scripts/Makefile.host
77008 endif
77009
77010 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
77011 index 686cb0d..9d653bf 100644
77012 --- a/scripts/Makefile.clean
77013 +++ b/scripts/Makefile.clean
77014 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
77015 __clean-files := $(extra-y) $(always) \
77016 $(targets) $(clean-files) \
77017 $(host-progs) \
77018 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77019 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77020 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77021
77022 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77023
77024 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77025 index 1ac414f..a1c1451 100644
77026 --- a/scripts/Makefile.host
77027 +++ b/scripts/Makefile.host
77028 @@ -31,6 +31,7 @@
77029 # Note: Shared libraries consisting of C++ files are not supported
77030
77031 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77032 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77033
77034 # C code
77035 # Executables compiled from a single .c file
77036 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77037 # Shared libaries (only .c supported)
77038 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77039 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77040 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77041 # Remove .so files from "xxx-objs"
77042 host-cobjs := $(filter-out %.so,$(host-cobjs))
77043
77044 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77045 index cb1f50c..cef2a7c 100644
77046 --- a/scripts/basic/fixdep.c
77047 +++ b/scripts/basic/fixdep.c
77048 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77049 /*
77050 * Lookup a value in the configuration string.
77051 */
77052 -static int is_defined_config(const char *name, int len, unsigned int hash)
77053 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77054 {
77055 struct item *aux;
77056
77057 @@ -211,10 +211,10 @@ static void clear_config(void)
77058 /*
77059 * Record the use of a CONFIG_* word.
77060 */
77061 -static void use_config(const char *m, int slen)
77062 +static void use_config(const char *m, unsigned int slen)
77063 {
77064 unsigned int hash = strhash(m, slen);
77065 - int c, i;
77066 + unsigned int c, i;
77067
77068 if (is_defined_config(m, slen, hash))
77069 return;
77070 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77071
77072 static void parse_config_file(const char *map, size_t len)
77073 {
77074 - const int *end = (const int *) (map + len);
77075 + const unsigned int *end = (const unsigned int *) (map + len);
77076 /* start at +1, so that p can never be < map */
77077 - const int *m = (const int *) map + 1;
77078 + const unsigned int *m = (const unsigned int *) map + 1;
77079 const char *p, *q;
77080
77081 for (; m < end; m++) {
77082 @@ -406,7 +406,7 @@ static void print_deps(void)
77083 static void traps(void)
77084 {
77085 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77086 - int *p = (int *)test;
77087 + unsigned int *p = (unsigned int *)test;
77088
77089 if (*p != INT_CONF) {
77090 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77091 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77092 new file mode 100644
77093 index 0000000..8729101
77094 --- /dev/null
77095 +++ b/scripts/gcc-plugin.sh
77096 @@ -0,0 +1,2 @@
77097 +#!/bin/sh
77098 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77099 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77100 index b89efe6..2c30808 100644
77101 --- a/scripts/mod/file2alias.c
77102 +++ b/scripts/mod/file2alias.c
77103 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
77104 unsigned long size, unsigned long id_size,
77105 void *symval)
77106 {
77107 - int i;
77108 + unsigned int i;
77109
77110 if (size % id_size || size < id_size) {
77111 if (cross_build != 0)
77112 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
77113 /* USB is special because the bcdDevice can be matched against a numeric range */
77114 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77115 static void do_usb_entry(struct usb_device_id *id,
77116 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77117 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77118 unsigned char range_lo, unsigned char range_hi,
77119 unsigned char max, struct module *mod)
77120 {
77121 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77122 {
77123 unsigned int devlo, devhi;
77124 unsigned char chi, clo, max;
77125 - int ndigits;
77126 + unsigned int ndigits;
77127
77128 id->match_flags = TO_NATIVE(id->match_flags);
77129 id->idVendor = TO_NATIVE(id->idVendor);
77130 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77131 for (i = 0; i < count; i++) {
77132 const char *id = (char *)devs[i].id;
77133 char acpi_id[sizeof(devs[0].id)];
77134 - int j;
77135 + unsigned int j;
77136
77137 buf_printf(&mod->dev_table_buf,
77138 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77139 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77140
77141 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77142 const char *id = (char *)card->devs[j].id;
77143 - int i2, j2;
77144 + unsigned int i2, j2;
77145 int dup = 0;
77146
77147 if (!id[0])
77148 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77149 /* add an individual alias for every device entry */
77150 if (!dup) {
77151 char acpi_id[sizeof(card->devs[0].id)];
77152 - int k;
77153 + unsigned int k;
77154
77155 buf_printf(&mod->dev_table_buf,
77156 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77157 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77158 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77159 char *alias)
77160 {
77161 - int i, j;
77162 + unsigned int i, j;
77163
77164 sprintf(alias, "dmi*");
77165
77166 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77167 index 9adb667..c6ac044 100644
77168 --- a/scripts/mod/modpost.c
77169 +++ b/scripts/mod/modpost.c
77170 @@ -919,6 +919,7 @@ enum mismatch {
77171 ANY_INIT_TO_ANY_EXIT,
77172 ANY_EXIT_TO_ANY_INIT,
77173 EXPORT_TO_INIT_EXIT,
77174 + DATA_TO_TEXT
77175 };
77176
77177 struct sectioncheck {
77178 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
77179 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77180 .mismatch = EXPORT_TO_INIT_EXIT,
77181 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77182 +},
77183 +/* Do not reference code from writable data */
77184 +{
77185 + .fromsec = { DATA_SECTIONS, NULL },
77186 + .tosec = { TEXT_SECTIONS, NULL },
77187 + .mismatch = DATA_TO_TEXT
77188 }
77189 };
77190
77191 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77192 continue;
77193 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77194 continue;
77195 - if (sym->st_value == addr)
77196 - return sym;
77197 /* Find a symbol nearby - addr are maybe negative */
77198 d = sym->st_value - addr;
77199 + if (d == 0)
77200 + return sym;
77201 if (d < 0)
77202 d = addr - sym->st_value;
77203 if (d < distance) {
77204 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
77205 tosym, prl_to, prl_to, tosym);
77206 free(prl_to);
77207 break;
77208 + case DATA_TO_TEXT:
77209 +/*
77210 + fprintf(stderr,
77211 + "The variable %s references\n"
77212 + "the %s %s%s%s\n",
77213 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77214 +*/
77215 + break;
77216 }
77217 fprintf(stderr, "\n");
77218 }
77219 @@ -1665,7 +1680,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77220 static void check_sec_ref(struct module *mod, const char *modname,
77221 struct elf_info *elf)
77222 {
77223 - int i;
77224 + unsigned int i;
77225 Elf_Shdr *sechdrs = elf->sechdrs;
77226
77227 /* Walk through all sections */
77228 @@ -1763,7 +1778,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77229 va_end(ap);
77230 }
77231
77232 -void buf_write(struct buffer *buf, const char *s, int len)
77233 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77234 {
77235 if (buf->size - buf->pos < len) {
77236 buf->size += len + SZ;
77237 @@ -1981,7 +1996,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77238 if (fstat(fileno(file), &st) < 0)
77239 goto close_write;
77240
77241 - if (st.st_size != b->pos)
77242 + if (st.st_size != (off_t)b->pos)
77243 goto close_write;
77244
77245 tmp = NOFAIL(malloc(b->pos));
77246 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77247 index 2031119..b5433af 100644
77248 --- a/scripts/mod/modpost.h
77249 +++ b/scripts/mod/modpost.h
77250 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77251
77252 struct buffer {
77253 char *p;
77254 - int pos;
77255 - int size;
77256 + unsigned int pos;
77257 + unsigned int size;
77258 };
77259
77260 void __attribute__((format(printf, 2, 3)))
77261 buf_printf(struct buffer *buf, const char *fmt, ...);
77262
77263 void
77264 -buf_write(struct buffer *buf, const char *s, int len);
77265 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77266
77267 struct module {
77268 struct module *next;
77269 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77270 index 9dfcd6d..099068e 100644
77271 --- a/scripts/mod/sumversion.c
77272 +++ b/scripts/mod/sumversion.c
77273 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77274 goto out;
77275 }
77276
77277 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77278 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77279 warn("writing sum in %s failed: %s\n",
77280 filename, strerror(errno));
77281 goto out;
77282 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77283 index 5c11312..72742b5 100644
77284 --- a/scripts/pnmtologo.c
77285 +++ b/scripts/pnmtologo.c
77286 @@ -237,14 +237,14 @@ static void write_header(void)
77287 fprintf(out, " * Linux logo %s\n", logoname);
77288 fputs(" */\n\n", out);
77289 fputs("#include <linux/linux_logo.h>\n\n", out);
77290 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77291 + fprintf(out, "static unsigned char %s_data[] = {\n",
77292 logoname);
77293 }
77294
77295 static void write_footer(void)
77296 {
77297 fputs("\n};\n\n", out);
77298 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77299 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77300 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77301 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77302 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77303 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77304 fputs("\n};\n\n", out);
77305
77306 /* write logo clut */
77307 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77308 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77309 logoname);
77310 write_hex_cnt = 0;
77311 for (i = 0; i < logo_clutsize; i++) {
77312 diff --git a/security/Kconfig b/security/Kconfig
77313 index 51bd5a0..3a4ebd0 100644
77314 --- a/security/Kconfig
77315 +++ b/security/Kconfig
77316 @@ -4,6 +4,627 @@
77317
77318 menu "Security options"
77319
77320 +source grsecurity/Kconfig
77321 +
77322 +menu "PaX"
77323 +
77324 + config ARCH_TRACK_EXEC_LIMIT
77325 + bool
77326 +
77327 + config PAX_KERNEXEC_PLUGIN
77328 + bool
77329 +
77330 + config PAX_PER_CPU_PGD
77331 + bool
77332 +
77333 + config TASK_SIZE_MAX_SHIFT
77334 + int
77335 + depends on X86_64
77336 + default 47 if !PAX_PER_CPU_PGD
77337 + default 42 if PAX_PER_CPU_PGD
77338 +
77339 + config PAX_ENABLE_PAE
77340 + bool
77341 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77342 +
77343 +config PAX
77344 + bool "Enable various PaX features"
77345 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77346 + help
77347 + This allows you to enable various PaX features. PaX adds
77348 + intrusion prevention mechanisms to the kernel that reduce
77349 + the risks posed by exploitable memory corruption bugs.
77350 +
77351 +menu "PaX Control"
77352 + depends on PAX
77353 +
77354 +config PAX_SOFTMODE
77355 + bool 'Support soft mode'
77356 + help
77357 + Enabling this option will allow you to run PaX in soft mode, that
77358 + is, PaX features will not be enforced by default, only on executables
77359 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77360 + support as they are the only way to mark executables for soft mode use.
77361 +
77362 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77363 + line option on boot. Furthermore you can control various PaX features
77364 + at runtime via the entries in /proc/sys/kernel/pax.
77365 +
77366 +config PAX_EI_PAX
77367 + bool 'Use legacy ELF header marking'
77368 + help
77369 + Enabling this option will allow you to control PaX features on
77370 + a per executable basis via the 'chpax' utility available at
77371 + http://pax.grsecurity.net/. The control flags will be read from
77372 + an otherwise reserved part of the ELF header. This marking has
77373 + numerous drawbacks (no support for soft-mode, toolchain does not
77374 + know about the non-standard use of the ELF header) therefore it
77375 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77376 + support.
77377 +
77378 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77379 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77380 + option otherwise they will not get any protection.
77381 +
77382 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77383 + support as well, they will override the legacy EI_PAX marks.
77384 +
77385 +config PAX_PT_PAX_FLAGS
77386 + bool 'Use ELF program header marking'
77387 + help
77388 + Enabling this option will allow you to control PaX features on
77389 + a per executable basis via the 'paxctl' utility available at
77390 + http://pax.grsecurity.net/. The control flags will be read from
77391 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77392 + has the benefits of supporting both soft mode and being fully
77393 + integrated into the toolchain (the binutils patch is available
77394 + from http://pax.grsecurity.net).
77395 +
77396 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77397 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77398 + support otherwise they will not get any protection.
77399 +
77400 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77401 + must make sure that the marks are the same if a binary has both marks.
77402 +
77403 + Note that if you enable the legacy EI_PAX marking support as well,
77404 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77405 +
77406 +config PAX_XATTR_PAX_FLAGS
77407 + bool 'Use filesystem extended attributes marking'
77408 + depends on EXPERT
77409 + select CIFS_XATTR if CIFS
77410 + select EXT2_FS_XATTR if EXT2_FS
77411 + select EXT3_FS_XATTR if EXT3_FS
77412 + select EXT4_FS_XATTR if EXT4_FS
77413 + select JFFS2_FS_XATTR if JFFS2_FS
77414 + select REISERFS_FS_XATTR if REISERFS_FS
77415 + select SQUASHFS_XATTR if SQUASHFS
77416 + select TMPFS_XATTR if TMPFS
77417 + select UBIFS_FS_XATTR if UBIFS_FS
77418 + help
77419 + Enabling this option will allow you to control PaX features on
77420 + a per executable basis via the 'setfattr' utility. The control
77421 + flags will be read from the user.pax.flags extended attribute of
77422 + the file. This marking has the benefit of supporting binary-only
77423 + applications that self-check themselves (e.g., skype) and would
77424 + not tolerate chpax/paxctl changes. The main drawback is that
77425 + extended attributes are not supported by some filesystems (e.g.,
77426 + isofs, udf, vfat) so copying files through such filesystems will
77427 + lose the extended attributes and these PaX markings.
77428 +
77429 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77430 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77431 + support otherwise they will not get any protection.
77432 +
77433 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77434 + must make sure that the marks are the same if a binary has both marks.
77435 +
77436 + Note that if you enable the legacy EI_PAX marking support as well,
77437 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77438 +
77439 +choice
77440 + prompt 'MAC system integration'
77441 + default PAX_HAVE_ACL_FLAGS
77442 + help
77443 + Mandatory Access Control systems have the option of controlling
77444 + PaX flags on a per executable basis, choose the method supported
77445 + by your particular system.
77446 +
77447 + - "none": if your MAC system does not interact with PaX,
77448 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77449 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77450 +
77451 + NOTE: this option is for developers/integrators only.
77452 +
77453 + config PAX_NO_ACL_FLAGS
77454 + bool 'none'
77455 +
77456 + config PAX_HAVE_ACL_FLAGS
77457 + bool 'direct'
77458 +
77459 + config PAX_HOOK_ACL_FLAGS
77460 + bool 'hook'
77461 +endchoice
77462 +
77463 +endmenu
77464 +
77465 +menu "Non-executable pages"
77466 + depends on PAX
77467 +
77468 +config PAX_NOEXEC
77469 + bool "Enforce non-executable pages"
77470 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77471 + help
77472 + By design some architectures do not allow for protecting memory
77473 + pages against execution or even if they do, Linux does not make
77474 + use of this feature. In practice this means that if a page is
77475 + readable (such as the stack or heap) it is also executable.
77476 +
77477 + There is a well known exploit technique that makes use of this
77478 + fact and a common programming mistake where an attacker can
77479 + introduce code of his choice somewhere in the attacked program's
77480 + memory (typically the stack or the heap) and then execute it.
77481 +
77482 + If the attacked program was running with different (typically
77483 + higher) privileges than that of the attacker, then he can elevate
77484 + his own privilege level (e.g. get a root shell, write to files for
77485 + which he does not have write access to, etc).
77486 +
77487 + Enabling this option will let you choose from various features
77488 + that prevent the injection and execution of 'foreign' code in
77489 + a program.
77490 +
77491 + This will also break programs that rely on the old behaviour and
77492 + expect that dynamically allocated memory via the malloc() family
77493 + of functions is executable (which it is not). Notable examples
77494 + are the XFree86 4.x server, the java runtime and wine.
77495 +
77496 +config PAX_PAGEEXEC
77497 + bool "Paging based non-executable pages"
77498 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
77499 + select S390_SWITCH_AMODE if S390
77500 + select S390_EXEC_PROTECT if S390
77501 + select ARCH_TRACK_EXEC_LIMIT if X86_32
77502 + help
77503 + This implementation is based on the paging feature of the CPU.
77504 + On i386 without hardware non-executable bit support there is a
77505 + variable but usually low performance impact, however on Intel's
77506 + P4 core based CPUs it is very high so you should not enable this
77507 + for kernels meant to be used on such CPUs.
77508 +
77509 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
77510 + with hardware non-executable bit support there is no performance
77511 + impact, on ppc the impact is negligible.
77512 +
77513 + Note that several architectures require various emulations due to
77514 + badly designed userland ABIs, this will cause a performance impact
77515 + but will disappear as soon as userland is fixed. For example, ppc
77516 + userland MUST have been built with secure-plt by a recent toolchain.
77517 +
77518 +config PAX_SEGMEXEC
77519 + bool "Segmentation based non-executable pages"
77520 + depends on PAX_NOEXEC && X86_32
77521 + help
77522 + This implementation is based on the segmentation feature of the
77523 + CPU and has a very small performance impact, however applications
77524 + will be limited to a 1.5 GB address space instead of the normal
77525 + 3 GB.
77526 +
77527 +config PAX_EMUTRAMP
77528 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
77529 + default y if PARISC
77530 + help
77531 + There are some programs and libraries that for one reason or
77532 + another attempt to execute special small code snippets from
77533 + non-executable memory pages. Most notable examples are the
77534 + signal handler return code generated by the kernel itself and
77535 + the GCC trampolines.
77536 +
77537 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
77538 + such programs will no longer work under your kernel.
77539 +
77540 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
77541 + utilities to enable trampoline emulation for the affected programs
77542 + yet still have the protection provided by the non-executable pages.
77543 +
77544 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
77545 + your system will not even boot.
77546 +
77547 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
77548 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
77549 + for the affected files.
77550 +
77551 + NOTE: enabling this feature *may* open up a loophole in the
77552 + protection provided by non-executable pages that an attacker
77553 + could abuse. Therefore the best solution is to not have any
77554 + files on your system that would require this option. This can
77555 + be achieved by not using libc5 (which relies on the kernel
77556 + signal handler return code) and not using or rewriting programs
77557 + that make use of the nested function implementation of GCC.
77558 + Skilled users can just fix GCC itself so that it implements
77559 + nested function calls in a way that does not interfere with PaX.
77560 +
77561 +config PAX_EMUSIGRT
77562 + bool "Automatically emulate sigreturn trampolines"
77563 + depends on PAX_EMUTRAMP && PARISC
77564 + default y
77565 + help
77566 + Enabling this option will have the kernel automatically detect
77567 + and emulate signal return trampolines executing on the stack
77568 + that would otherwise lead to task termination.
77569 +
77570 + This solution is intended as a temporary one for users with
77571 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
77572 + Modula-3 runtime, etc) or executables linked to such, basically
77573 + everything that does not specify its own SA_RESTORER function in
77574 + normal executable memory like glibc 2.1+ does.
77575 +
77576 + On parisc you MUST enable this option, otherwise your system will
77577 + not even boot.
77578 +
77579 + NOTE: this feature cannot be disabled on a per executable basis
77580 + and since it *does* open up a loophole in the protection provided
77581 + by non-executable pages, the best solution is to not have any
77582 + files on your system that would require this option.
77583 +
77584 +config PAX_MPROTECT
77585 + bool "Restrict mprotect()"
77586 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
77587 + help
77588 + Enabling this option will prevent programs from
77589 + - changing the executable status of memory pages that were
77590 + not originally created as executable,
77591 + - making read-only executable pages writable again,
77592 + - creating executable pages from anonymous memory,
77593 + - making read-only-after-relocations (RELRO) data pages writable again.
77594 +
77595 + You should say Y here to complete the protection provided by
77596 + the enforcement of non-executable pages.
77597 +
77598 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77599 + this feature on a per file basis.
77600 +
77601 +config PAX_MPROTECT_COMPAT
77602 + bool "Use legacy/compat protection demoting (read help)"
77603 + depends on PAX_MPROTECT
77604 + default n
77605 + help
77606 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
77607 + by sending the proper error code to the application. For some broken
77608 + userland, this can cause problems with Python or other applications. The
77609 + current implementation however allows for applications like clamav to
77610 + detect if JIT compilation/execution is allowed and to fall back gracefully
77611 + to an interpreter-based mode if it does not. While we encourage everyone
77612 + to use the current implementation as-is and push upstream to fix broken
77613 + userland (note that the RWX logging option can assist with this), in some
77614 + environments this may not be possible. Having to disable MPROTECT
77615 + completely on certain binaries reduces the security benefit of PaX,
77616 + so this option is provided for those environments to revert to the old
77617 + behavior.
77618 +
77619 +config PAX_ELFRELOCS
77620 + bool "Allow ELF text relocations (read help)"
77621 + depends on PAX_MPROTECT
77622 + default n
77623 + help
77624 + Non-executable pages and mprotect() restrictions are effective
77625 + in preventing the introduction of new executable code into an
77626 + attacked task's address space. There remain only two venues
77627 + for this kind of attack: if the attacker can execute already
77628 + existing code in the attacked task then he can either have it
77629 + create and mmap() a file containing his code or have it mmap()
77630 + an already existing ELF library that does not have position
77631 + independent code in it and use mprotect() on it to make it
77632 + writable and copy his code there. While protecting against
77633 + the former approach is beyond PaX, the latter can be prevented
77634 + by having only PIC ELF libraries on one's system (which do not
77635 + need to relocate their code). If you are sure this is your case,
77636 + as is the case with all modern Linux distributions, then leave
77637 + this option disabled. You should say 'n' here.
77638 +
77639 +config PAX_ETEXECRELOCS
77640 + bool "Allow ELF ET_EXEC text relocations"
77641 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
77642 + select PAX_ELFRELOCS
77643 + default y
77644 + help
77645 + On some architectures there are incorrectly created applications
77646 + that require text relocations and would not work without enabling
77647 + this option. If you are an alpha, ia64 or parisc user, you should
77648 + enable this option and disable it once you have made sure that
77649 + none of your applications need it.
77650 +
77651 +config PAX_EMUPLT
77652 + bool "Automatically emulate ELF PLT"
77653 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
77654 + default y
77655 + help
77656 + Enabling this option will have the kernel automatically detect
77657 + and emulate the Procedure Linkage Table entries in ELF files.
77658 + On some architectures such entries are in writable memory, and
77659 + become non-executable leading to task termination. Therefore
77660 + it is mandatory that you enable this option on alpha, parisc,
77661 + sparc and sparc64, otherwise your system would not even boot.
77662 +
77663 + NOTE: this feature *does* open up a loophole in the protection
77664 + provided by the non-executable pages, therefore the proper
77665 + solution is to modify the toolchain to produce a PLT that does
77666 + not need to be writable.
77667 +
77668 +config PAX_DLRESOLVE
77669 + bool 'Emulate old glibc resolver stub'
77670 + depends on PAX_EMUPLT && SPARC
77671 + default n
77672 + help
77673 + This option is needed if userland has an old glibc (before 2.4)
77674 + that puts a 'save' instruction into the runtime generated resolver
77675 + stub that needs special emulation.
77676 +
77677 +config PAX_KERNEXEC
77678 + bool "Enforce non-executable kernel pages"
77679 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
77680 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
77681 + select PAX_KERNEXEC_PLUGIN if X86_64
77682 + help
77683 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
77684 + that is, enabling this option will make it harder to inject
77685 + and execute 'foreign' code in kernel memory itself.
77686 +
77687 + Note that on x86_64 kernels there is a known regression when
77688 + this feature and KVM/VMX are both enabled in the host kernel.
77689 +
77690 +choice
77691 + prompt "Return Address Instrumentation Method"
77692 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
77693 + depends on PAX_KERNEXEC_PLUGIN
77694 + help
77695 + Select the method used to instrument function pointer dereferences.
77696 + Note that binary modules cannot be instrumented by this approach.
77697 +
77698 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
77699 + bool "bts"
77700 + help
77701 + This method is compatible with binary only modules but has
77702 + a higher runtime overhead.
77703 +
77704 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
77705 + bool "or"
77706 + depends on !PARAVIRT
77707 + help
77708 + This method is incompatible with binary only modules but has
77709 + a lower runtime overhead.
77710 +endchoice
77711 +
77712 +config PAX_KERNEXEC_PLUGIN_METHOD
77713 + string
77714 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
77715 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
77716 + default ""
77717 +
77718 +config PAX_KERNEXEC_MODULE_TEXT
77719 + int "Minimum amount of memory reserved for module code"
77720 + default "4"
77721 + depends on PAX_KERNEXEC && X86_32 && MODULES
77722 + help
77723 + Due to implementation details the kernel must reserve a fixed
77724 + amount of memory for module code at compile time that cannot be
77725 + changed at runtime. Here you can specify the minimum amount
77726 + in MB that will be reserved. Due to the same implementation
77727 + details this size will always be rounded up to the next 2/4 MB
77728 + boundary (depends on PAE) so the actually available memory for
77729 + module code will usually be more than this minimum.
77730 +
77731 + The default 4 MB should be enough for most users but if you have
77732 + an excessive number of modules (e.g., most distribution configs
77733 + compile many drivers as modules) or use huge modules such as
77734 + nvidia's kernel driver, you will need to adjust this amount.
77735 + A good rule of thumb is to look at your currently loaded kernel
77736 + modules and add up their sizes.
77737 +
77738 +endmenu
77739 +
77740 +menu "Address Space Layout Randomization"
77741 + depends on PAX
77742 +
77743 +config PAX_ASLR
77744 + bool "Address Space Layout Randomization"
77745 + help
77746 + Many if not most exploit techniques rely on the knowledge of
77747 + certain addresses in the attacked program. The following options
77748 + will allow the kernel to apply a certain amount of randomization
77749 + to specific parts of the program thereby forcing an attacker to
77750 + guess them in most cases. Any failed guess will most likely crash
77751 + the attacked program which allows the kernel to detect such attempts
77752 + and react on them. PaX itself provides no reaction mechanisms,
77753 + instead it is strongly encouraged that you make use of Nergal's
77754 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
77755 + (http://www.grsecurity.net/) built-in crash detection features or
77756 + develop one yourself.
77757 +
77758 + By saying Y here you can choose to randomize the following areas:
77759 + - top of the task's kernel stack
77760 + - top of the task's userland stack
77761 + - base address for mmap() requests that do not specify one
77762 + (this includes all libraries)
77763 + - base address of the main executable
77764 +
77765 + It is strongly recommended to say Y here as address space layout
77766 + randomization has negligible impact on performance yet it provides
77767 + a very effective protection.
77768 +
77769 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77770 + this feature on a per file basis.
77771 +
77772 +config PAX_RANDKSTACK
77773 + bool "Randomize kernel stack base"
77774 + depends on X86_TSC && X86
77775 + help
77776 + By saying Y here the kernel will randomize every task's kernel
77777 + stack on every system call. This will not only force an attacker
77778 + to guess it but also prevent him from making use of possible
77779 + leaked information about it.
77780 +
77781 + Since the kernel stack is a rather scarce resource, randomization
77782 + may cause unexpected stack overflows, therefore you should very
77783 + carefully test your system. Note that once enabled in the kernel
77784 + configuration, this feature cannot be disabled on a per file basis.
77785 +
77786 +config PAX_RANDUSTACK
77787 + bool "Randomize user stack base"
77788 + depends on PAX_ASLR
77789 + help
77790 + By saying Y here the kernel will randomize every task's userland
77791 + stack. The randomization is done in two steps where the second
77792 + one may apply a big amount of shift to the top of the stack and
77793 + cause problems for programs that want to use lots of memory (more
77794 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
77795 + For this reason the second step can be controlled by 'chpax' or
77796 + 'paxctl' on a per file basis.
77797 +
77798 +config PAX_RANDMMAP
77799 + bool "Randomize mmap() base"
77800 + depends on PAX_ASLR
77801 + help
77802 + By saying Y here the kernel will use a randomized base address for
77803 + mmap() requests that do not specify one themselves. As a result
77804 + all dynamically loaded libraries will appear at random addresses
77805 + and therefore be harder to exploit by a technique where an attacker
77806 + attempts to execute library code for his purposes (e.g. spawn a
77807 + shell from an exploited program that is running at an elevated
77808 + privilege level).
77809 +
77810 + Furthermore, if a program is relinked as a dynamic ELF file, its
77811 + base address will be randomized as well, completing the full
77812 + randomization of the address space layout. Attacking such programs
77813 + becomes a guess game. You can find an example of doing this at
77814 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
77815 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
77816 +
77817 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
77818 + feature on a per file basis.
77819 +
77820 +endmenu
77821 +
77822 +menu "Miscellaneous hardening features"
77823 +
77824 +config PAX_MEMORY_SANITIZE
77825 + bool "Sanitize all freed memory"
77826 + depends on !HIBERNATION
77827 + help
77828 + By saying Y here the kernel will erase memory pages as soon as they
77829 + are freed. This in turn reduces the lifetime of data stored in the
77830 + pages, making it less likely that sensitive information such as
77831 + passwords, cryptographic secrets, etc stay in memory for too long.
77832 +
77833 + This is especially useful for programs whose runtime is short, long
77834 + lived processes and the kernel itself benefit from this as long as
77835 + they operate on whole memory pages and ensure timely freeing of pages
77836 + that may hold sensitive information.
77837 +
77838 + The tradeoff is performance impact, on a single CPU system kernel
77839 + compilation sees a 3% slowdown, other systems and workloads may vary
77840 + and you are advised to test this feature on your expected workload
77841 + before deploying it.
77842 +
77843 + Note that this feature does not protect data stored in live pages,
77844 + e.g., process memory swapped to disk may stay there for a long time.
77845 +
77846 +config PAX_MEMORY_STACKLEAK
77847 + bool "Sanitize kernel stack"
77848 + depends on X86
77849 + help
77850 + By saying Y here the kernel will erase the kernel stack before it
77851 + returns from a system call. This in turn reduces the information
77852 + that a kernel stack leak bug can reveal.
77853 +
77854 + Note that such a bug can still leak information that was put on
77855 + the stack by the current system call (the one eventually triggering
77856 + the bug) but traces of earlier system calls on the kernel stack
77857 + cannot leak anymore.
77858 +
77859 + The tradeoff is performance impact: on a single CPU system kernel
77860 + compilation sees a 1% slowdown, other systems and workloads may vary
77861 + and you are advised to test this feature on your expected workload
77862 + before deploying it.
77863 +
77864 + Note: full support for this feature requires gcc with plugin support
77865 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
77866 + versions means that functions with large enough stack frames may
77867 + leave uninitialized memory behind that may be exposed to a later
77868 + syscall leaking the stack.
77869 +
77870 +config PAX_MEMORY_UDEREF
77871 + bool "Prevent invalid userland pointer dereference"
77872 + depends on X86 && !UML_X86 && !XEN
77873 + select PAX_PER_CPU_PGD if X86_64
77874 + help
77875 + By saying Y here the kernel will be prevented from dereferencing
77876 + userland pointers in contexts where the kernel expects only kernel
77877 + pointers. This is both a useful runtime debugging feature and a
77878 + security measure that prevents exploiting a class of kernel bugs.
77879 +
77880 + The tradeoff is that some virtualization solutions may experience
77881 + a huge slowdown and therefore you should not enable this feature
77882 + for kernels meant to run in such environments. Whether a given VM
77883 + solution is affected or not is best determined by simply trying it
77884 + out, the performance impact will be obvious right on boot as this
77885 + mechanism engages from very early on. A good rule of thumb is that
77886 + VMs running on CPUs without hardware virtualization support (i.e.,
77887 + the majority of IA-32 CPUs) will likely experience the slowdown.
77888 +
77889 +config PAX_REFCOUNT
77890 + bool "Prevent various kernel object reference counter overflows"
77891 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
77892 + help
77893 + By saying Y here the kernel will detect and prevent overflowing
77894 + various (but not all) kinds of object reference counters. Such
77895 + overflows can normally occur due to bugs only and are often, if
77896 + not always, exploitable.
77897 +
77898 + The tradeoff is that data structures protected by an overflowed
77899 + refcount will never be freed and therefore will leak memory. Note
77900 + that this leak also happens even without this protection but in
77901 + that case the overflow can eventually trigger the freeing of the
77902 + data structure while it is still being used elsewhere, resulting
77903 + in the exploitable situation that this feature prevents.
77904 +
77905 + Since this has a negligible performance impact, you should enable
77906 + this feature.
77907 +
77908 +config PAX_USERCOPY
77909 + bool "Harden heap object copies between kernel and userland"
77910 + depends on X86 || PPC || SPARC || ARM
77911 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
77912 + help
77913 + By saying Y here the kernel will enforce the size of heap objects
77914 + when they are copied in either direction between the kernel and
77915 + userland, even if only a part of the heap object is copied.
77916 +
77917 + Specifically, this checking prevents information leaking from the
77918 + kernel heap during kernel to userland copies (if the kernel heap
77919 + object is otherwise fully initialized) and prevents kernel heap
77920 + overflows during userland to kernel copies.
77921 +
77922 + Note that the current implementation provides the strictest bounds
77923 + checks for the SLUB allocator.
77924 +
77925 + Enabling this option also enables per-slab cache protection against
77926 + data in a given cache being copied into/out of via userland
77927 + accessors. Though the whitelist of regions will be reduced over
77928 + time, it notably protects important data structures like task structs.
77929 +
77930 + If frame pointers are enabled on x86, this option will also restrict
77931 + copies into and out of the kernel stack to local variables within a
77932 + single frame.
77933 +
77934 + Since this has a negligible performance impact, you should enable
77935 + this feature.
77936 +
77937 +endmenu
77938 +
77939 +endmenu
77940 +
77941 config KEYS
77942 bool "Enable access key retention support"
77943 help
77944 @@ -169,7 +790,7 @@ config INTEL_TXT
77945 config LSM_MMAP_MIN_ADDR
77946 int "Low address space for LSM to protect from user allocation"
77947 depends on SECURITY && SECURITY_SELINUX
77948 - default 32768 if ARM
77949 + default 32768 if ALPHA || ARM || PARISC || SPARC32
77950 default 65536
77951 help
77952 This is the portion of low virtual memory which should be protected
77953 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
77954 index 97ce8fa..23dad96 100644
77955 --- a/security/apparmor/lsm.c
77956 +++ b/security/apparmor/lsm.c
77957 @@ -620,7 +620,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
77958 return error;
77959 }
77960
77961 -static struct security_operations apparmor_ops = {
77962 +static struct security_operations apparmor_ops __read_only = {
77963 .name = "apparmor",
77964
77965 .ptrace_access_check = apparmor_ptrace_access_check,
77966 diff --git a/security/commoncap.c b/security/commoncap.c
77967 index 7ce191e..6c29c34 100644
77968 --- a/security/commoncap.c
77969 +++ b/security/commoncap.c
77970 @@ -28,6 +28,7 @@
77971 #include <linux/prctl.h>
77972 #include <linux/securebits.h>
77973 #include <linux/user_namespace.h>
77974 +#include <net/sock.h>
77975
77976 /*
77977 * If a non-root user executes a setuid-root binary in
77978 @@ -569,6 +570,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
77979 {
77980 const struct cred *cred = current_cred();
77981
77982 + if (gr_acl_enable_at_secure())
77983 + return 1;
77984 +
77985 if (cred->uid != 0) {
77986 if (bprm->cap_effective)
77987 return 1;
77988 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
77989 index 3ccf7ac..d73ad64 100644
77990 --- a/security/integrity/ima/ima.h
77991 +++ b/security/integrity/ima/ima.h
77992 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
77993 extern spinlock_t ima_queue_lock;
77994
77995 struct ima_h_table {
77996 - atomic_long_t len; /* number of stored measurements in the list */
77997 - atomic_long_t violations;
77998 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
77999 + atomic_long_unchecked_t violations;
78000 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
78001 };
78002 extern struct ima_h_table ima_htable;
78003 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
78004 index 88a2788..581ab92 100644
78005 --- a/security/integrity/ima/ima_api.c
78006 +++ b/security/integrity/ima/ima_api.c
78007 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78008 int result;
78009
78010 /* can overflow, only indicator */
78011 - atomic_long_inc(&ima_htable.violations);
78012 + atomic_long_inc_unchecked(&ima_htable.violations);
78013
78014 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
78015 if (!entry) {
78016 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
78017 index e1aa2b4..52027bf 100644
78018 --- a/security/integrity/ima/ima_fs.c
78019 +++ b/security/integrity/ima/ima_fs.c
78020 @@ -28,12 +28,12 @@
78021 static int valid_policy = 1;
78022 #define TMPBUFLEN 12
78023 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78024 - loff_t *ppos, atomic_long_t *val)
78025 + loff_t *ppos, atomic_long_unchecked_t *val)
78026 {
78027 char tmpbuf[TMPBUFLEN];
78028 ssize_t len;
78029
78030 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78031 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78032 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78033 }
78034
78035 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78036 index 55a6271..ad829c3 100644
78037 --- a/security/integrity/ima/ima_queue.c
78038 +++ b/security/integrity/ima/ima_queue.c
78039 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78040 INIT_LIST_HEAD(&qe->later);
78041 list_add_tail_rcu(&qe->later, &ima_measurements);
78042
78043 - atomic_long_inc(&ima_htable.len);
78044 + atomic_long_inc_unchecked(&ima_htable.len);
78045 key = ima_hash_key(entry->digest);
78046 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78047 return 0;
78048 diff --git a/security/keys/compat.c b/security/keys/compat.c
78049 index 4c48e13..7abdac9 100644
78050 --- a/security/keys/compat.c
78051 +++ b/security/keys/compat.c
78052 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78053 if (ret == 0)
78054 goto no_payload_free;
78055
78056 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78057 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78058
78059 if (iov != iovstack)
78060 kfree(iov);
78061 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78062 index 0b3f5d7..892c8a6 100644
78063 --- a/security/keys/keyctl.c
78064 +++ b/security/keys/keyctl.c
78065 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78066 /*
78067 * Copy the iovec data from userspace
78068 */
78069 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78070 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78071 unsigned ioc)
78072 {
78073 for (; ioc > 0; ioc--) {
78074 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78075 * If successful, 0 will be returned.
78076 */
78077 long keyctl_instantiate_key_common(key_serial_t id,
78078 - const struct iovec *payload_iov,
78079 + const struct iovec __user *payload_iov,
78080 unsigned ioc,
78081 size_t plen,
78082 key_serial_t ringid)
78083 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
78084 [0].iov_len = plen
78085 };
78086
78087 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78088 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78089 }
78090
78091 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78092 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78093 if (ret == 0)
78094 goto no_payload_free;
78095
78096 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78097 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78098
78099 if (iov != iovstack)
78100 kfree(iov);
78101 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78102 index d605f75..2bc6be9 100644
78103 --- a/security/keys/keyring.c
78104 +++ b/security/keys/keyring.c
78105 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78106 ret = -EFAULT;
78107
78108 for (loop = 0; loop < klist->nkeys; loop++) {
78109 + key_serial_t serial;
78110 key = klist->keys[loop];
78111 + serial = key->serial;
78112
78113 tmp = sizeof(key_serial_t);
78114 if (tmp > buflen)
78115 tmp = buflen;
78116
78117 - if (copy_to_user(buffer,
78118 - &key->serial,
78119 - tmp) != 0)
78120 + if (copy_to_user(buffer, &serial, tmp))
78121 goto error;
78122
78123 buflen -= tmp;
78124 diff --git a/security/min_addr.c b/security/min_addr.c
78125 index f728728..6457a0c 100644
78126 --- a/security/min_addr.c
78127 +++ b/security/min_addr.c
78128 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78129 */
78130 static void update_mmap_min_addr(void)
78131 {
78132 +#ifndef SPARC
78133 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78134 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78135 mmap_min_addr = dac_mmap_min_addr;
78136 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78137 #else
78138 mmap_min_addr = dac_mmap_min_addr;
78139 #endif
78140 +#endif
78141 }
78142
78143 /*
78144 diff --git a/security/security.c b/security/security.c
78145 index d754249..8bf426e 100644
78146 --- a/security/security.c
78147 +++ b/security/security.c
78148 @@ -26,8 +26,8 @@
78149 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78150 CONFIG_DEFAULT_SECURITY;
78151
78152 -static struct security_operations *security_ops;
78153 -static struct security_operations default_security_ops = {
78154 +static struct security_operations *security_ops __read_only;
78155 +static struct security_operations default_security_ops __read_only = {
78156 .name = "default",
78157 };
78158
78159 @@ -68,7 +68,9 @@ int __init security_init(void)
78160
78161 void reset_security_ops(void)
78162 {
78163 + pax_open_kernel();
78164 security_ops = &default_security_ops;
78165 + pax_close_kernel();
78166 }
78167
78168 /* Save user chosen LSM */
78169 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78170 index 6a3683e..f52f4c0 100644
78171 --- a/security/selinux/hooks.c
78172 +++ b/security/selinux/hooks.c
78173 @@ -94,8 +94,6 @@
78174
78175 #define NUM_SEL_MNT_OPTS 5
78176
78177 -extern struct security_operations *security_ops;
78178 -
78179 /* SECMARK reference count */
78180 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78181
78182 @@ -5429,7 +5427,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78183
78184 #endif
78185
78186 -static struct security_operations selinux_ops = {
78187 +static struct security_operations selinux_ops __read_only = {
78188 .name = "selinux",
78189
78190 .ptrace_access_check = selinux_ptrace_access_check,
78191 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78192 index b43813c..74be837 100644
78193 --- a/security/selinux/include/xfrm.h
78194 +++ b/security/selinux/include/xfrm.h
78195 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78196
78197 static inline void selinux_xfrm_notify_policyload(void)
78198 {
78199 - atomic_inc(&flow_cache_genid);
78200 + atomic_inc_unchecked(&flow_cache_genid);
78201 }
78202 #else
78203 static inline int selinux_xfrm_enabled(void)
78204 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78205 index e8af5b0b..78527ef 100644
78206 --- a/security/smack/smack_lsm.c
78207 +++ b/security/smack/smack_lsm.c
78208 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78209 return 0;
78210 }
78211
78212 -struct security_operations smack_ops = {
78213 +struct security_operations smack_ops __read_only = {
78214 .name = "smack",
78215
78216 .ptrace_access_check = smack_ptrace_access_check,
78217 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78218 index 620d37c..e2ad89b 100644
78219 --- a/security/tomoyo/tomoyo.c
78220 +++ b/security/tomoyo/tomoyo.c
78221 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78222 * tomoyo_security_ops is a "struct security_operations" which is used for
78223 * registering TOMOYO.
78224 */
78225 -static struct security_operations tomoyo_security_ops = {
78226 +static struct security_operations tomoyo_security_ops __read_only = {
78227 .name = "tomoyo",
78228 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78229 .cred_prepare = tomoyo_cred_prepare,
78230 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78231 index 762af68..7103453 100644
78232 --- a/sound/aoa/codecs/onyx.c
78233 +++ b/sound/aoa/codecs/onyx.c
78234 @@ -54,7 +54,7 @@ struct onyx {
78235 spdif_locked:1,
78236 analog_locked:1,
78237 original_mute:2;
78238 - int open_count;
78239 + local_t open_count;
78240 struct codec_info *codec_info;
78241
78242 /* mutex serializes concurrent access to the device
78243 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78244 struct onyx *onyx = cii->codec_data;
78245
78246 mutex_lock(&onyx->mutex);
78247 - onyx->open_count++;
78248 + local_inc(&onyx->open_count);
78249 mutex_unlock(&onyx->mutex);
78250
78251 return 0;
78252 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78253 struct onyx *onyx = cii->codec_data;
78254
78255 mutex_lock(&onyx->mutex);
78256 - onyx->open_count--;
78257 - if (!onyx->open_count)
78258 + if (local_dec_and_test(&onyx->open_count))
78259 onyx->spdif_locked = onyx->analog_locked = 0;
78260 mutex_unlock(&onyx->mutex);
78261
78262 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78263 index ffd2025..df062c9 100644
78264 --- a/sound/aoa/codecs/onyx.h
78265 +++ b/sound/aoa/codecs/onyx.h
78266 @@ -11,6 +11,7 @@
78267 #include <linux/i2c.h>
78268 #include <asm/pmac_low_i2c.h>
78269 #include <asm/prom.h>
78270 +#include <asm/local.h>
78271
78272 /* PCM3052 register definitions */
78273
78274 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78275 index 08fde00..0bf641a 100644
78276 --- a/sound/core/oss/pcm_oss.c
78277 +++ b/sound/core/oss/pcm_oss.c
78278 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78279 if (in_kernel) {
78280 mm_segment_t fs;
78281 fs = snd_enter_user();
78282 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78283 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78284 snd_leave_user(fs);
78285 } else {
78286 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78287 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78288 }
78289 if (ret != -EPIPE && ret != -ESTRPIPE)
78290 break;
78291 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78292 if (in_kernel) {
78293 mm_segment_t fs;
78294 fs = snd_enter_user();
78295 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78296 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78297 snd_leave_user(fs);
78298 } else {
78299 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78300 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78301 }
78302 if (ret == -EPIPE) {
78303 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78304 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78305 struct snd_pcm_plugin_channel *channels;
78306 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78307 if (!in_kernel) {
78308 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78309 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78310 return -EFAULT;
78311 buf = runtime->oss.buffer;
78312 }
78313 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78314 }
78315 } else {
78316 tmp = snd_pcm_oss_write2(substream,
78317 - (const char __force *)buf,
78318 + (const char __force_kernel *)buf,
78319 runtime->oss.period_bytes, 0);
78320 if (tmp <= 0)
78321 goto err;
78322 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78323 struct snd_pcm_runtime *runtime = substream->runtime;
78324 snd_pcm_sframes_t frames, frames1;
78325 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78326 - char __user *final_dst = (char __force __user *)buf;
78327 + char __user *final_dst = (char __force_user *)buf;
78328 if (runtime->oss.plugin_first) {
78329 struct snd_pcm_plugin_channel *channels;
78330 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78331 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78332 xfer += tmp;
78333 runtime->oss.buffer_used -= tmp;
78334 } else {
78335 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78336 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78337 runtime->oss.period_bytes, 0);
78338 if (tmp <= 0)
78339 goto err;
78340 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78341 size1);
78342 size1 /= runtime->channels; /* frames */
78343 fs = snd_enter_user();
78344 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78345 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78346 snd_leave_user(fs);
78347 }
78348 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78349 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78350 index 91cdf94..4085161 100644
78351 --- a/sound/core/pcm_compat.c
78352 +++ b/sound/core/pcm_compat.c
78353 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78354 int err;
78355
78356 fs = snd_enter_user();
78357 - err = snd_pcm_delay(substream, &delay);
78358 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78359 snd_leave_user(fs);
78360 if (err < 0)
78361 return err;
78362 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78363 index 25ed9fe..24c46e9 100644
78364 --- a/sound/core/pcm_native.c
78365 +++ b/sound/core/pcm_native.c
78366 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78367 switch (substream->stream) {
78368 case SNDRV_PCM_STREAM_PLAYBACK:
78369 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78370 - (void __user *)arg);
78371 + (void __force_user *)arg);
78372 break;
78373 case SNDRV_PCM_STREAM_CAPTURE:
78374 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78375 - (void __user *)arg);
78376 + (void __force_user *)arg);
78377 break;
78378 default:
78379 result = -EINVAL;
78380 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78381 index 5cf8d65..912a79c 100644
78382 --- a/sound/core/seq/seq_device.c
78383 +++ b/sound/core/seq/seq_device.c
78384 @@ -64,7 +64,7 @@ struct ops_list {
78385 int argsize; /* argument size */
78386
78387 /* operators */
78388 - struct snd_seq_dev_ops ops;
78389 + struct snd_seq_dev_ops *ops;
78390
78391 /* registred devices */
78392 struct list_head dev_list; /* list of devices */
78393 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78394
78395 mutex_lock(&ops->reg_mutex);
78396 /* copy driver operators */
78397 - ops->ops = *entry;
78398 + ops->ops = entry;
78399 ops->driver |= DRIVER_LOADED;
78400 ops->argsize = argsize;
78401
78402 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78403 dev->name, ops->id, ops->argsize, dev->argsize);
78404 return -EINVAL;
78405 }
78406 - if (ops->ops.init_device(dev) >= 0) {
78407 + if (ops->ops->init_device(dev) >= 0) {
78408 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78409 ops->num_init_devices++;
78410 } else {
78411 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
78412 dev->name, ops->id, ops->argsize, dev->argsize);
78413 return -EINVAL;
78414 }
78415 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
78416 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
78417 dev->status = SNDRV_SEQ_DEVICE_FREE;
78418 dev->driver_data = NULL;
78419 ops->num_init_devices--;
78420 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
78421 index 621e60e..f4543f5 100644
78422 --- a/sound/drivers/mts64.c
78423 +++ b/sound/drivers/mts64.c
78424 @@ -29,6 +29,7 @@
78425 #include <sound/initval.h>
78426 #include <sound/rawmidi.h>
78427 #include <sound/control.h>
78428 +#include <asm/local.h>
78429
78430 #define CARD_NAME "Miditerminal 4140"
78431 #define DRIVER_NAME "MTS64"
78432 @@ -67,7 +68,7 @@ struct mts64 {
78433 struct pardevice *pardev;
78434 int pardev_claimed;
78435
78436 - int open_count;
78437 + local_t open_count;
78438 int current_midi_output_port;
78439 int current_midi_input_port;
78440 u8 mode[MTS64_NUM_INPUT_PORTS];
78441 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78442 {
78443 struct mts64 *mts = substream->rmidi->private_data;
78444
78445 - if (mts->open_count == 0) {
78446 + if (local_read(&mts->open_count) == 0) {
78447 /* We don't need a spinlock here, because this is just called
78448 if the device has not been opened before.
78449 So there aren't any IRQs from the device */
78450 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78451
78452 msleep(50);
78453 }
78454 - ++(mts->open_count);
78455 + local_inc(&mts->open_count);
78456
78457 return 0;
78458 }
78459 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78460 struct mts64 *mts = substream->rmidi->private_data;
78461 unsigned long flags;
78462
78463 - --(mts->open_count);
78464 - if (mts->open_count == 0) {
78465 + if (local_dec_return(&mts->open_count) == 0) {
78466 /* We need the spinlock_irqsave here because we can still
78467 have IRQs at this point */
78468 spin_lock_irqsave(&mts->lock, flags);
78469 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78470
78471 msleep(500);
78472
78473 - } else if (mts->open_count < 0)
78474 - mts->open_count = 0;
78475 + } else if (local_read(&mts->open_count) < 0)
78476 + local_set(&mts->open_count, 0);
78477
78478 return 0;
78479 }
78480 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
78481 index b953fb4..1999c01 100644
78482 --- a/sound/drivers/opl4/opl4_lib.c
78483 +++ b/sound/drivers/opl4/opl4_lib.c
78484 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
78485 MODULE_DESCRIPTION("OPL4 driver");
78486 MODULE_LICENSE("GPL");
78487
78488 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
78489 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
78490 {
78491 int timeout = 10;
78492 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
78493 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
78494 index 3e32bd3..46fc152 100644
78495 --- a/sound/drivers/portman2x4.c
78496 +++ b/sound/drivers/portman2x4.c
78497 @@ -48,6 +48,7 @@
78498 #include <sound/initval.h>
78499 #include <sound/rawmidi.h>
78500 #include <sound/control.h>
78501 +#include <asm/local.h>
78502
78503 #define CARD_NAME "Portman 2x4"
78504 #define DRIVER_NAME "portman"
78505 @@ -85,7 +86,7 @@ struct portman {
78506 struct pardevice *pardev;
78507 int pardev_claimed;
78508
78509 - int open_count;
78510 + local_t open_count;
78511 int mode[PORTMAN_NUM_INPUT_PORTS];
78512 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
78513 };
78514 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
78515 index 87657dd..a8268d4 100644
78516 --- a/sound/firewire/amdtp.c
78517 +++ b/sound/firewire/amdtp.c
78518 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
78519 ptr = s->pcm_buffer_pointer + data_blocks;
78520 if (ptr >= pcm->runtime->buffer_size)
78521 ptr -= pcm->runtime->buffer_size;
78522 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
78523 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
78524
78525 s->pcm_period_pointer += data_blocks;
78526 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
78527 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
78528 */
78529 void amdtp_out_stream_update(struct amdtp_out_stream *s)
78530 {
78531 - ACCESS_ONCE(s->source_node_id_field) =
78532 + ACCESS_ONCE_RW(s->source_node_id_field) =
78533 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
78534 }
78535 EXPORT_SYMBOL(amdtp_out_stream_update);
78536 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
78537 index 537a9cb..8e8c8e9 100644
78538 --- a/sound/firewire/amdtp.h
78539 +++ b/sound/firewire/amdtp.h
78540 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
78541 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
78542 struct snd_pcm_substream *pcm)
78543 {
78544 - ACCESS_ONCE(s->pcm) = pcm;
78545 + ACCESS_ONCE_RW(s->pcm) = pcm;
78546 }
78547
78548 /**
78549 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
78550 index cd094ec..eca1277 100644
78551 --- a/sound/firewire/isight.c
78552 +++ b/sound/firewire/isight.c
78553 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
78554 ptr += count;
78555 if (ptr >= runtime->buffer_size)
78556 ptr -= runtime->buffer_size;
78557 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
78558 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
78559
78560 isight->period_counter += count;
78561 if (isight->period_counter >= runtime->period_size) {
78562 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
78563 if (err < 0)
78564 return err;
78565
78566 - ACCESS_ONCE(isight->pcm_active) = true;
78567 + ACCESS_ONCE_RW(isight->pcm_active) = true;
78568
78569 return 0;
78570 }
78571 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
78572 {
78573 struct isight *isight = substream->private_data;
78574
78575 - ACCESS_ONCE(isight->pcm_active) = false;
78576 + ACCESS_ONCE_RW(isight->pcm_active) = false;
78577
78578 mutex_lock(&isight->mutex);
78579 isight_stop_streaming(isight);
78580 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
78581
78582 switch (cmd) {
78583 case SNDRV_PCM_TRIGGER_START:
78584 - ACCESS_ONCE(isight->pcm_running) = true;
78585 + ACCESS_ONCE_RW(isight->pcm_running) = true;
78586 break;
78587 case SNDRV_PCM_TRIGGER_STOP:
78588 - ACCESS_ONCE(isight->pcm_running) = false;
78589 + ACCESS_ONCE_RW(isight->pcm_running) = false;
78590 break;
78591 default:
78592 return -EINVAL;
78593 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
78594 index 7bd5e33..1fcab12 100644
78595 --- a/sound/isa/cmi8330.c
78596 +++ b/sound/isa/cmi8330.c
78597 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
78598
78599 struct snd_pcm *pcm;
78600 struct snd_cmi8330_stream {
78601 - struct snd_pcm_ops ops;
78602 + snd_pcm_ops_no_const ops;
78603 snd_pcm_open_callback_t open;
78604 void *private_data; /* sb or wss */
78605 } streams[2];
78606 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
78607 index 733b014..56ce96f 100644
78608 --- a/sound/oss/sb_audio.c
78609 +++ b/sound/oss/sb_audio.c
78610 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
78611 buf16 = (signed short *)(localbuf + localoffs);
78612 while (c)
78613 {
78614 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78615 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78616 if (copy_from_user(lbuf8,
78617 userbuf+useroffs + p,
78618 locallen))
78619 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
78620 index 09d4648..cf234c7 100644
78621 --- a/sound/oss/swarm_cs4297a.c
78622 +++ b/sound/oss/swarm_cs4297a.c
78623 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
78624 {
78625 struct cs4297a_state *s;
78626 u32 pwr, id;
78627 - mm_segment_t fs;
78628 int rval;
78629 #ifndef CONFIG_BCM_CS4297A_CSWARM
78630 u64 cfg;
78631 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
78632 if (!rval) {
78633 char *sb1250_duart_present;
78634
78635 +#if 0
78636 + mm_segment_t fs;
78637 fs = get_fs();
78638 set_fs(KERNEL_DS);
78639 -#if 0
78640 val = SOUND_MASK_LINE;
78641 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
78642 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
78643 val = initvol[i].vol;
78644 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
78645 }
78646 + set_fs(fs);
78647 // cs4297a_write_ac97(s, 0x18, 0x0808);
78648 #else
78649 // cs4297a_write_ac97(s, 0x5e, 0x180);
78650 cs4297a_write_ac97(s, 0x02, 0x0808);
78651 cs4297a_write_ac97(s, 0x18, 0x0808);
78652 #endif
78653 - set_fs(fs);
78654
78655 list_add(&s->list, &cs4297a_devs);
78656
78657 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
78658 index f0f1943..8e1f96c 100644
78659 --- a/sound/pci/hda/hda_codec.h
78660 +++ b/sound/pci/hda/hda_codec.h
78661 @@ -611,7 +611,7 @@ struct hda_bus_ops {
78662 /* notify power-up/down from codec to controller */
78663 void (*pm_notify)(struct hda_bus *bus);
78664 #endif
78665 -};
78666 +} __no_const;
78667
78668 /* template to pass to the bus constructor */
78669 struct hda_bus_template {
78670 @@ -713,6 +713,7 @@ struct hda_codec_ops {
78671 #endif
78672 void (*reboot_notify)(struct hda_codec *codec);
78673 };
78674 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
78675
78676 /* record for amp information cache */
78677 struct hda_cache_head {
78678 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
78679 struct snd_pcm_substream *substream);
78680 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
78681 struct snd_pcm_substream *substream);
78682 -};
78683 +} __no_const;
78684
78685 /* PCM information for each substream */
78686 struct hda_pcm_stream {
78687 @@ -801,7 +802,7 @@ struct hda_codec {
78688 const char *modelname; /* model name for preset */
78689
78690 /* set by patch */
78691 - struct hda_codec_ops patch_ops;
78692 + hda_codec_ops_no_const patch_ops;
78693
78694 /* PCM to create, set by patch_ops.build_pcms callback */
78695 unsigned int num_pcms;
78696 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
78697 index 0da778a..bc38b84 100644
78698 --- a/sound/pci/ice1712/ice1712.h
78699 +++ b/sound/pci/ice1712/ice1712.h
78700 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
78701 unsigned int mask_flags; /* total mask bits */
78702 struct snd_akm4xxx_ops {
78703 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
78704 - } ops;
78705 + } __no_const ops;
78706 };
78707
78708 struct snd_ice1712_spdif {
78709 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
78710 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78711 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78712 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78713 - } ops;
78714 + } __no_const ops;
78715 };
78716
78717
78718 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
78719 index 12a9a2b..2b6138f 100644
78720 --- a/sound/pci/ymfpci/ymfpci_main.c
78721 +++ b/sound/pci/ymfpci/ymfpci_main.c
78722 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
78723 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
78724 break;
78725 }
78726 - if (atomic_read(&chip->interrupt_sleep_count)) {
78727 - atomic_set(&chip->interrupt_sleep_count, 0);
78728 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78729 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78730 wake_up(&chip->interrupt_sleep);
78731 }
78732 __end:
78733 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
78734 continue;
78735 init_waitqueue_entry(&wait, current);
78736 add_wait_queue(&chip->interrupt_sleep, &wait);
78737 - atomic_inc(&chip->interrupt_sleep_count);
78738 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
78739 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
78740 remove_wait_queue(&chip->interrupt_sleep, &wait);
78741 }
78742 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
78743 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
78744 spin_unlock(&chip->reg_lock);
78745
78746 - if (atomic_read(&chip->interrupt_sleep_count)) {
78747 - atomic_set(&chip->interrupt_sleep_count, 0);
78748 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78749 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78750 wake_up(&chip->interrupt_sleep);
78751 }
78752 }
78753 @@ -2389,7 +2389,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
78754 spin_lock_init(&chip->reg_lock);
78755 spin_lock_init(&chip->voice_lock);
78756 init_waitqueue_head(&chip->interrupt_sleep);
78757 - atomic_set(&chip->interrupt_sleep_count, 0);
78758 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78759 chip->card = card;
78760 chip->pci = pci;
78761 chip->irq = -1;
78762 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
78763 index cdc860a..db34a93 100644
78764 --- a/sound/soc/soc-pcm.c
78765 +++ b/sound/soc/soc-pcm.c
78766 @@ -605,7 +605,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
78767 struct snd_soc_platform *platform = rtd->platform;
78768 struct snd_soc_dai *codec_dai = rtd->codec_dai;
78769 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
78770 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
78771 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
78772 struct snd_pcm *pcm;
78773 char new_name[64];
78774 int ret = 0, playback = 0, capture = 0;
78775 diff --git a/sound/usb/card.h b/sound/usb/card.h
78776 index da5fa1a..113cd02 100644
78777 --- a/sound/usb/card.h
78778 +++ b/sound/usb/card.h
78779 @@ -45,6 +45,7 @@ struct snd_urb_ops {
78780 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
78781 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
78782 };
78783 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
78784
78785 struct snd_usb_substream {
78786 struct snd_usb_stream *stream;
78787 @@ -94,7 +95,7 @@ struct snd_usb_substream {
78788 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
78789 spinlock_t lock;
78790
78791 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
78792 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
78793 int last_frame_number; /* stored frame number */
78794 int last_delay; /* stored delay */
78795 };
78796 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
78797 new file mode 100644
78798 index 0000000..894c8bf
78799 --- /dev/null
78800 +++ b/tools/gcc/Makefile
78801 @@ -0,0 +1,23 @@
78802 +#CC := gcc
78803 +#PLUGIN_SOURCE_FILES := pax_plugin.c
78804 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
78805 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
78806 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
78807 +
78808 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
78809 +
78810 +hostlibs-y := constify_plugin.so
78811 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
78812 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
78813 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
78814 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
78815 +hostlibs-y += colorize_plugin.so
78816 +
78817 +always := $(hostlibs-y)
78818 +
78819 +constify_plugin-objs := constify_plugin.o
78820 +stackleak_plugin-objs := stackleak_plugin.o
78821 +kallocstat_plugin-objs := kallocstat_plugin.o
78822 +kernexec_plugin-objs := kernexec_plugin.o
78823 +checker_plugin-objs := checker_plugin.o
78824 +colorize_plugin-objs := colorize_plugin.o
78825 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
78826 new file mode 100644
78827 index 0000000..d41b5af
78828 --- /dev/null
78829 +++ b/tools/gcc/checker_plugin.c
78830 @@ -0,0 +1,171 @@
78831 +/*
78832 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78833 + * Licensed under the GPL v2
78834 + *
78835 + * Note: the choice of the license means that the compilation process is
78836 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78837 + * but for the kernel it doesn't matter since it doesn't link against
78838 + * any of the gcc libraries
78839 + *
78840 + * gcc plugin to implement various sparse (source code checker) features
78841 + *
78842 + * TODO:
78843 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
78844 + *
78845 + * BUGS:
78846 + * - none known
78847 + */
78848 +#include "gcc-plugin.h"
78849 +#include "config.h"
78850 +#include "system.h"
78851 +#include "coretypes.h"
78852 +#include "tree.h"
78853 +#include "tree-pass.h"
78854 +#include "flags.h"
78855 +#include "intl.h"
78856 +#include "toplev.h"
78857 +#include "plugin.h"
78858 +//#include "expr.h" where are you...
78859 +#include "diagnostic.h"
78860 +#include "plugin-version.h"
78861 +#include "tm.h"
78862 +#include "function.h"
78863 +#include "basic-block.h"
78864 +#include "gimple.h"
78865 +#include "rtl.h"
78866 +#include "emit-rtl.h"
78867 +#include "tree-flow.h"
78868 +#include "target.h"
78869 +
78870 +extern void c_register_addr_space (const char *str, addr_space_t as);
78871 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
78872 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
78873 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
78874 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
78875 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
78876 +
78877 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78878 +extern rtx emit_move_insn(rtx x, rtx y);
78879 +
78880 +int plugin_is_GPL_compatible;
78881 +
78882 +static struct plugin_info checker_plugin_info = {
78883 + .version = "201111150100",
78884 +};
78885 +
78886 +#define ADDR_SPACE_KERNEL 0
78887 +#define ADDR_SPACE_FORCE_KERNEL 1
78888 +#define ADDR_SPACE_USER 2
78889 +#define ADDR_SPACE_FORCE_USER 3
78890 +#define ADDR_SPACE_IOMEM 0
78891 +#define ADDR_SPACE_FORCE_IOMEM 0
78892 +#define ADDR_SPACE_PERCPU 0
78893 +#define ADDR_SPACE_FORCE_PERCPU 0
78894 +#define ADDR_SPACE_RCU 0
78895 +#define ADDR_SPACE_FORCE_RCU 0
78896 +
78897 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
78898 +{
78899 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
78900 +}
78901 +
78902 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
78903 +{
78904 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
78905 +}
78906 +
78907 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
78908 +{
78909 + return default_addr_space_valid_pointer_mode(mode, as);
78910 +}
78911 +
78912 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
78913 +{
78914 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
78915 +}
78916 +
78917 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
78918 +{
78919 + return default_addr_space_legitimize_address(x, oldx, mode, as);
78920 +}
78921 +
78922 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
78923 +{
78924 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
78925 + return true;
78926 +
78927 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
78928 + return true;
78929 +
78930 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
78931 + return true;
78932 +
78933 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
78934 + return true;
78935 +
78936 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
78937 + return true;
78938 +
78939 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
78940 + return true;
78941 +
78942 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
78943 + return true;
78944 +
78945 + return subset == superset;
78946 +}
78947 +
78948 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
78949 +{
78950 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
78951 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
78952 +
78953 + return op;
78954 +}
78955 +
78956 +static void register_checker_address_spaces(void *event_data, void *data)
78957 +{
78958 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
78959 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
78960 + c_register_addr_space("__user", ADDR_SPACE_USER);
78961 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
78962 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
78963 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
78964 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
78965 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
78966 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
78967 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
78968 +
78969 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
78970 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
78971 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
78972 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
78973 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
78974 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
78975 + targetm.addr_space.convert = checker_addr_space_convert;
78976 +}
78977 +
78978 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78979 +{
78980 + const char * const plugin_name = plugin_info->base_name;
78981 + const int argc = plugin_info->argc;
78982 + const struct plugin_argument * const argv = plugin_info->argv;
78983 + int i;
78984 +
78985 + if (!plugin_default_version_check(version, &gcc_version)) {
78986 + error(G_("incompatible gcc/plugin versions"));
78987 + return 1;
78988 + }
78989 +
78990 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
78991 +
78992 + for (i = 0; i < argc; ++i)
78993 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78994 +
78995 + if (TARGET_64BIT == 0)
78996 + return 0;
78997 +
78998 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
78999 +
79000 + return 0;
79001 +}
79002 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
79003 new file mode 100644
79004 index 0000000..ee950d0
79005 --- /dev/null
79006 +++ b/tools/gcc/colorize_plugin.c
79007 @@ -0,0 +1,147 @@
79008 +/*
79009 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
79010 + * Licensed under the GPL v2
79011 + *
79012 + * Note: the choice of the license means that the compilation process is
79013 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79014 + * but for the kernel it doesn't matter since it doesn't link against
79015 + * any of the gcc libraries
79016 + *
79017 + * gcc plugin to colorize diagnostic output
79018 + *
79019 + */
79020 +
79021 +#include "gcc-plugin.h"
79022 +#include "config.h"
79023 +#include "system.h"
79024 +#include "coretypes.h"
79025 +#include "tree.h"
79026 +#include "tree-pass.h"
79027 +#include "flags.h"
79028 +#include "intl.h"
79029 +#include "toplev.h"
79030 +#include "plugin.h"
79031 +#include "diagnostic.h"
79032 +#include "plugin-version.h"
79033 +#include "tm.h"
79034 +
79035 +int plugin_is_GPL_compatible;
79036 +
79037 +static struct plugin_info colorize_plugin_info = {
79038 + .version = "201203092200",
79039 +};
79040 +
79041 +#define GREEN "\033[32m\033[2m"
79042 +#define LIGHTGREEN "\033[32m\033[1m"
79043 +#define YELLOW "\033[33m\033[2m"
79044 +#define LIGHTYELLOW "\033[33m\033[1m"
79045 +#define RED "\033[31m\033[2m"
79046 +#define LIGHTRED "\033[31m\033[1m"
79047 +#define BLUE "\033[34m\033[2m"
79048 +#define LIGHTBLUE "\033[34m\033[1m"
79049 +#define BRIGHT "\033[m\033[1m"
79050 +#define NORMAL "\033[m"
79051 +
79052 +static diagnostic_starter_fn old_starter;
79053 +static diagnostic_finalizer_fn old_finalizer;
79054 +
79055 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79056 +{
79057 + const char *color;
79058 + char *newprefix;
79059 +
79060 + switch (diagnostic->kind) {
79061 + case DK_NOTE:
79062 + color = LIGHTBLUE;
79063 + break;
79064 +
79065 + case DK_PEDWARN:
79066 + case DK_WARNING:
79067 + color = LIGHTYELLOW;
79068 + break;
79069 +
79070 + case DK_ERROR:
79071 + case DK_FATAL:
79072 + case DK_ICE:
79073 + case DK_PERMERROR:
79074 + case DK_SORRY:
79075 + color = LIGHTRED;
79076 + break;
79077 +
79078 + default:
79079 + color = NORMAL;
79080 + }
79081 +
79082 + old_starter(context, diagnostic);
79083 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79084 + return;
79085 + pp_destroy_prefix(context->printer);
79086 + pp_set_prefix(context->printer, newprefix);
79087 +}
79088 +
79089 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79090 +{
79091 + old_finalizer(context, diagnostic);
79092 +}
79093 +
79094 +static void colorize_arm(void)
79095 +{
79096 + old_starter = diagnostic_starter(global_dc);
79097 + old_finalizer = diagnostic_finalizer(global_dc);
79098 +
79099 + diagnostic_starter(global_dc) = start_colorize;
79100 + diagnostic_finalizer(global_dc) = finalize_colorize;
79101 +}
79102 +
79103 +static unsigned int execute_colorize_rearm(void)
79104 +{
79105 + if (diagnostic_starter(global_dc) == start_colorize)
79106 + return 0;
79107 +
79108 + colorize_arm();
79109 + return 0;
79110 +}
79111 +
79112 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79113 + .pass = {
79114 + .type = SIMPLE_IPA_PASS,
79115 + .name = "colorize_rearm",
79116 + .gate = NULL,
79117 + .execute = execute_colorize_rearm,
79118 + .sub = NULL,
79119 + .next = NULL,
79120 + .static_pass_number = 0,
79121 + .tv_id = TV_NONE,
79122 + .properties_required = 0,
79123 + .properties_provided = 0,
79124 + .properties_destroyed = 0,
79125 + .todo_flags_start = 0,
79126 + .todo_flags_finish = 0
79127 + }
79128 +};
79129 +
79130 +static void colorize_start_unit(void *gcc_data, void *user_data)
79131 +{
79132 + colorize_arm();
79133 +}
79134 +
79135 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79136 +{
79137 + const char * const plugin_name = plugin_info->base_name;
79138 + struct register_pass_info colorize_rearm_pass_info = {
79139 + .pass = &pass_ipa_colorize_rearm.pass,
79140 + .reference_pass_name = "*free_lang_data",
79141 + .ref_pass_instance_number = 0,
79142 + .pos_op = PASS_POS_INSERT_AFTER
79143 + };
79144 +
79145 + if (!plugin_default_version_check(version, &gcc_version)) {
79146 + error(G_("incompatible gcc/plugin versions"));
79147 + return 1;
79148 + }
79149 +
79150 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79151 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79152 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79153 + return 0;
79154 +}
79155 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79156 new file mode 100644
79157 index 0000000..704a564
79158 --- /dev/null
79159 +++ b/tools/gcc/constify_plugin.c
79160 @@ -0,0 +1,303 @@
79161 +/*
79162 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79163 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79164 + * Licensed under the GPL v2, or (at your option) v3
79165 + *
79166 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79167 + *
79168 + * Homepage:
79169 + * http://www.grsecurity.net/~ephox/const_plugin/
79170 + *
79171 + * Usage:
79172 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79173 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79174 + */
79175 +
79176 +#include "gcc-plugin.h"
79177 +#include "config.h"
79178 +#include "system.h"
79179 +#include "coretypes.h"
79180 +#include "tree.h"
79181 +#include "tree-pass.h"
79182 +#include "flags.h"
79183 +#include "intl.h"
79184 +#include "toplev.h"
79185 +#include "plugin.h"
79186 +#include "diagnostic.h"
79187 +#include "plugin-version.h"
79188 +#include "tm.h"
79189 +#include "function.h"
79190 +#include "basic-block.h"
79191 +#include "gimple.h"
79192 +#include "rtl.h"
79193 +#include "emit-rtl.h"
79194 +#include "tree-flow.h"
79195 +
79196 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79197 +
79198 +int plugin_is_GPL_compatible;
79199 +
79200 +static struct plugin_info const_plugin_info = {
79201 + .version = "201111150100",
79202 + .help = "no-constify\tturn off constification\n",
79203 +};
79204 +
79205 +static void constify_type(tree type);
79206 +static bool walk_struct(tree node);
79207 +
79208 +static tree deconstify_type(tree old_type)
79209 +{
79210 + tree new_type, field;
79211 +
79212 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79213 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79214 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79215 + DECL_FIELD_CONTEXT(field) = new_type;
79216 + TYPE_READONLY(new_type) = 0;
79217 + C_TYPE_FIELDS_READONLY(new_type) = 0;
79218 + return new_type;
79219 +}
79220 +
79221 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79222 +{
79223 + tree type;
79224 +
79225 + *no_add_attrs = true;
79226 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79227 + error("%qE attribute does not apply to functions", name);
79228 + return NULL_TREE;
79229 + }
79230 +
79231 + if (TREE_CODE(*node) == VAR_DECL) {
79232 + error("%qE attribute does not apply to variables", name);
79233 + return NULL_TREE;
79234 + }
79235 +
79236 + if (TYPE_P(*node)) {
79237 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79238 + *no_add_attrs = false;
79239 + else
79240 + error("%qE attribute applies to struct and union types only", name);
79241 + return NULL_TREE;
79242 + }
79243 +
79244 + type = TREE_TYPE(*node);
79245 +
79246 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79247 + error("%qE attribute applies to struct and union types only", name);
79248 + return NULL_TREE;
79249 + }
79250 +
79251 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79252 + error("%qE attribute is already applied to the type", name);
79253 + return NULL_TREE;
79254 + }
79255 +
79256 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79257 + error("%qE attribute used on type that is not constified", name);
79258 + return NULL_TREE;
79259 + }
79260 +
79261 + if (TREE_CODE(*node) == TYPE_DECL) {
79262 + TREE_TYPE(*node) = deconstify_type(type);
79263 + TREE_READONLY(*node) = 0;
79264 + return NULL_TREE;
79265 + }
79266 +
79267 + return NULL_TREE;
79268 +}
79269 +
79270 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79271 +{
79272 + *no_add_attrs = true;
79273 + if (!TYPE_P(*node)) {
79274 + error("%qE attribute applies to types only", name);
79275 + return NULL_TREE;
79276 + }
79277 +
79278 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79279 + error("%qE attribute applies to struct and union types only", name);
79280 + return NULL_TREE;
79281 + }
79282 +
79283 + *no_add_attrs = false;
79284 + constify_type(*node);
79285 + return NULL_TREE;
79286 +}
79287 +
79288 +static struct attribute_spec no_const_attr = {
79289 + .name = "no_const",
79290 + .min_length = 0,
79291 + .max_length = 0,
79292 + .decl_required = false,
79293 + .type_required = false,
79294 + .function_type_required = false,
79295 + .handler = handle_no_const_attribute,
79296 +#if BUILDING_GCC_VERSION >= 4007
79297 + .affects_type_identity = true
79298 +#endif
79299 +};
79300 +
79301 +static struct attribute_spec do_const_attr = {
79302 + .name = "do_const",
79303 + .min_length = 0,
79304 + .max_length = 0,
79305 + .decl_required = false,
79306 + .type_required = false,
79307 + .function_type_required = false,
79308 + .handler = handle_do_const_attribute,
79309 +#if BUILDING_GCC_VERSION >= 4007
79310 + .affects_type_identity = true
79311 +#endif
79312 +};
79313 +
79314 +static void register_attributes(void *event_data, void *data)
79315 +{
79316 + register_attribute(&no_const_attr);
79317 + register_attribute(&do_const_attr);
79318 +}
79319 +
79320 +static void constify_type(tree type)
79321 +{
79322 + TYPE_READONLY(type) = 1;
79323 + C_TYPE_FIELDS_READONLY(type) = 1;
79324 +}
79325 +
79326 +static bool is_fptr(tree field)
79327 +{
79328 + tree ptr = TREE_TYPE(field);
79329 +
79330 + if (TREE_CODE(ptr) != POINTER_TYPE)
79331 + return false;
79332 +
79333 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79334 +}
79335 +
79336 +static bool walk_struct(tree node)
79337 +{
79338 + tree field;
79339 +
79340 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
79341 + return false;
79342 +
79343 + if (TYPE_FIELDS(node) == NULL_TREE)
79344 + return false;
79345 +
79346 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79347 + tree type = TREE_TYPE(field);
79348 + enum tree_code code = TREE_CODE(type);
79349 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79350 + if (!(walk_struct(type)))
79351 + return false;
79352 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79353 + return false;
79354 + }
79355 + return true;
79356 +}
79357 +
79358 +static void finish_type(void *event_data, void *data)
79359 +{
79360 + tree type = (tree)event_data;
79361 +
79362 + if (type == NULL_TREE)
79363 + return;
79364 +
79365 + if (TYPE_READONLY(type))
79366 + return;
79367 +
79368 + if (walk_struct(type))
79369 + constify_type(type);
79370 +}
79371 +
79372 +static unsigned int check_local_variables(void);
79373 +
79374 +struct gimple_opt_pass pass_local_variable = {
79375 + {
79376 + .type = GIMPLE_PASS,
79377 + .name = "check_local_variables",
79378 + .gate = NULL,
79379 + .execute = check_local_variables,
79380 + .sub = NULL,
79381 + .next = NULL,
79382 + .static_pass_number = 0,
79383 + .tv_id = TV_NONE,
79384 + .properties_required = 0,
79385 + .properties_provided = 0,
79386 + .properties_destroyed = 0,
79387 + .todo_flags_start = 0,
79388 + .todo_flags_finish = 0
79389 + }
79390 +};
79391 +
79392 +static unsigned int check_local_variables(void)
79393 +{
79394 + tree var;
79395 + referenced_var_iterator rvi;
79396 +
79397 +#if BUILDING_GCC_VERSION == 4005
79398 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79399 +#else
79400 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79401 +#endif
79402 + tree type = TREE_TYPE(var);
79403 +
79404 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79405 + continue;
79406 +
79407 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79408 + continue;
79409 +
79410 + if (!TYPE_READONLY(type))
79411 + continue;
79412 +
79413 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
79414 +// continue;
79415 +
79416 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
79417 +// continue;
79418 +
79419 + if (walk_struct(type)) {
79420 + error("constified variable %qE cannot be local", var);
79421 + return 1;
79422 + }
79423 + }
79424 + return 0;
79425 +}
79426 +
79427 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79428 +{
79429 + const char * const plugin_name = plugin_info->base_name;
79430 + const int argc = plugin_info->argc;
79431 + const struct plugin_argument * const argv = plugin_info->argv;
79432 + int i;
79433 + bool constify = true;
79434 +
79435 + struct register_pass_info local_variable_pass_info = {
79436 + .pass = &pass_local_variable.pass,
79437 + .reference_pass_name = "*referenced_vars",
79438 + .ref_pass_instance_number = 0,
79439 + .pos_op = PASS_POS_INSERT_AFTER
79440 + };
79441 +
79442 + if (!plugin_default_version_check(version, &gcc_version)) {
79443 + error(G_("incompatible gcc/plugin versions"));
79444 + return 1;
79445 + }
79446 +
79447 + for (i = 0; i < argc; ++i) {
79448 + if (!(strcmp(argv[i].key, "no-constify"))) {
79449 + constify = false;
79450 + continue;
79451 + }
79452 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79453 + }
79454 +
79455 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
79456 + if (constify) {
79457 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
79458 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
79459 + }
79460 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
79461 +
79462 + return 0;
79463 +}
79464 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
79465 new file mode 100644
79466 index 0000000..a5eabce
79467 --- /dev/null
79468 +++ b/tools/gcc/kallocstat_plugin.c
79469 @@ -0,0 +1,167 @@
79470 +/*
79471 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79472 + * Licensed under the GPL v2
79473 + *
79474 + * Note: the choice of the license means that the compilation process is
79475 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79476 + * but for the kernel it doesn't matter since it doesn't link against
79477 + * any of the gcc libraries
79478 + *
79479 + * gcc plugin to find the distribution of k*alloc sizes
79480 + *
79481 + * TODO:
79482 + *
79483 + * BUGS:
79484 + * - none known
79485 + */
79486 +#include "gcc-plugin.h"
79487 +#include "config.h"
79488 +#include "system.h"
79489 +#include "coretypes.h"
79490 +#include "tree.h"
79491 +#include "tree-pass.h"
79492 +#include "flags.h"
79493 +#include "intl.h"
79494 +#include "toplev.h"
79495 +#include "plugin.h"
79496 +//#include "expr.h" where are you...
79497 +#include "diagnostic.h"
79498 +#include "plugin-version.h"
79499 +#include "tm.h"
79500 +#include "function.h"
79501 +#include "basic-block.h"
79502 +#include "gimple.h"
79503 +#include "rtl.h"
79504 +#include "emit-rtl.h"
79505 +
79506 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79507 +
79508 +int plugin_is_GPL_compatible;
79509 +
79510 +static const char * const kalloc_functions[] = {
79511 + "__kmalloc",
79512 + "kmalloc",
79513 + "kmalloc_large",
79514 + "kmalloc_node",
79515 + "kmalloc_order",
79516 + "kmalloc_order_trace",
79517 + "kmalloc_slab",
79518 + "kzalloc",
79519 + "kzalloc_node",
79520 +};
79521 +
79522 +static struct plugin_info kallocstat_plugin_info = {
79523 + .version = "201111150100",
79524 +};
79525 +
79526 +static unsigned int execute_kallocstat(void);
79527 +
79528 +static struct gimple_opt_pass kallocstat_pass = {
79529 + .pass = {
79530 + .type = GIMPLE_PASS,
79531 + .name = "kallocstat",
79532 + .gate = NULL,
79533 + .execute = execute_kallocstat,
79534 + .sub = NULL,
79535 + .next = NULL,
79536 + .static_pass_number = 0,
79537 + .tv_id = TV_NONE,
79538 + .properties_required = 0,
79539 + .properties_provided = 0,
79540 + .properties_destroyed = 0,
79541 + .todo_flags_start = 0,
79542 + .todo_flags_finish = 0
79543 + }
79544 +};
79545 +
79546 +static bool is_kalloc(const char *fnname)
79547 +{
79548 + size_t i;
79549 +
79550 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
79551 + if (!strcmp(fnname, kalloc_functions[i]))
79552 + return true;
79553 + return false;
79554 +}
79555 +
79556 +static unsigned int execute_kallocstat(void)
79557 +{
79558 + basic_block bb;
79559 +
79560 + // 1. loop through BBs and GIMPLE statements
79561 + FOR_EACH_BB(bb) {
79562 + gimple_stmt_iterator gsi;
79563 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79564 + // gimple match:
79565 + tree fndecl, size;
79566 + gimple call_stmt;
79567 + const char *fnname;
79568 +
79569 + // is it a call
79570 + call_stmt = gsi_stmt(gsi);
79571 + if (!is_gimple_call(call_stmt))
79572 + continue;
79573 + fndecl = gimple_call_fndecl(call_stmt);
79574 + if (fndecl == NULL_TREE)
79575 + continue;
79576 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
79577 + continue;
79578 +
79579 + // is it a call to k*alloc
79580 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
79581 + if (!is_kalloc(fnname))
79582 + continue;
79583 +
79584 + // is the size arg the result of a simple const assignment
79585 + size = gimple_call_arg(call_stmt, 0);
79586 + while (true) {
79587 + gimple def_stmt;
79588 + expanded_location xloc;
79589 + size_t size_val;
79590 +
79591 + if (TREE_CODE(size) != SSA_NAME)
79592 + break;
79593 + def_stmt = SSA_NAME_DEF_STMT(size);
79594 + if (!def_stmt || !is_gimple_assign(def_stmt))
79595 + break;
79596 + if (gimple_num_ops(def_stmt) != 2)
79597 + break;
79598 + size = gimple_assign_rhs1(def_stmt);
79599 + if (!TREE_CONSTANT(size))
79600 + continue;
79601 + xloc = expand_location(gimple_location(def_stmt));
79602 + if (!xloc.file)
79603 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
79604 + size_val = TREE_INT_CST_LOW(size);
79605 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
79606 + break;
79607 + }
79608 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79609 +//debug_tree(gimple_call_fn(call_stmt));
79610 +//print_node(stderr, "pax", fndecl, 4);
79611 + }
79612 + }
79613 +
79614 + return 0;
79615 +}
79616 +
79617 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79618 +{
79619 + const char * const plugin_name = plugin_info->base_name;
79620 + struct register_pass_info kallocstat_pass_info = {
79621 + .pass = &kallocstat_pass.pass,
79622 + .reference_pass_name = "ssa",
79623 + .ref_pass_instance_number = 0,
79624 + .pos_op = PASS_POS_INSERT_AFTER
79625 + };
79626 +
79627 + if (!plugin_default_version_check(version, &gcc_version)) {
79628 + error(G_("incompatible gcc/plugin versions"));
79629 + return 1;
79630 + }
79631 +
79632 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
79633 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
79634 +
79635 + return 0;
79636 +}
79637 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
79638 new file mode 100644
79639 index 0000000..008f159
79640 --- /dev/null
79641 +++ b/tools/gcc/kernexec_plugin.c
79642 @@ -0,0 +1,427 @@
79643 +/*
79644 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79645 + * Licensed under the GPL v2
79646 + *
79647 + * Note: the choice of the license means that the compilation process is
79648 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79649 + * but for the kernel it doesn't matter since it doesn't link against
79650 + * any of the gcc libraries
79651 + *
79652 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
79653 + *
79654 + * TODO:
79655 + *
79656 + * BUGS:
79657 + * - none known
79658 + */
79659 +#include "gcc-plugin.h"
79660 +#include "config.h"
79661 +#include "system.h"
79662 +#include "coretypes.h"
79663 +#include "tree.h"
79664 +#include "tree-pass.h"
79665 +#include "flags.h"
79666 +#include "intl.h"
79667 +#include "toplev.h"
79668 +#include "plugin.h"
79669 +//#include "expr.h" where are you...
79670 +#include "diagnostic.h"
79671 +#include "plugin-version.h"
79672 +#include "tm.h"
79673 +#include "function.h"
79674 +#include "basic-block.h"
79675 +#include "gimple.h"
79676 +#include "rtl.h"
79677 +#include "emit-rtl.h"
79678 +#include "tree-flow.h"
79679 +
79680 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79681 +extern rtx emit_move_insn(rtx x, rtx y);
79682 +
79683 +int plugin_is_GPL_compatible;
79684 +
79685 +static struct plugin_info kernexec_plugin_info = {
79686 + .version = "201111291120",
79687 + .help = "method=[bts|or]\tinstrumentation method\n"
79688 +};
79689 +
79690 +static unsigned int execute_kernexec_reload(void);
79691 +static unsigned int execute_kernexec_fptr(void);
79692 +static unsigned int execute_kernexec_retaddr(void);
79693 +static bool kernexec_cmodel_check(void);
79694 +
79695 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
79696 +static void (*kernexec_instrument_retaddr)(rtx);
79697 +
79698 +static struct gimple_opt_pass kernexec_reload_pass = {
79699 + .pass = {
79700 + .type = GIMPLE_PASS,
79701 + .name = "kernexec_reload",
79702 + .gate = kernexec_cmodel_check,
79703 + .execute = execute_kernexec_reload,
79704 + .sub = NULL,
79705 + .next = NULL,
79706 + .static_pass_number = 0,
79707 + .tv_id = TV_NONE,
79708 + .properties_required = 0,
79709 + .properties_provided = 0,
79710 + .properties_destroyed = 0,
79711 + .todo_flags_start = 0,
79712 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79713 + }
79714 +};
79715 +
79716 +static struct gimple_opt_pass kernexec_fptr_pass = {
79717 + .pass = {
79718 + .type = GIMPLE_PASS,
79719 + .name = "kernexec_fptr",
79720 + .gate = kernexec_cmodel_check,
79721 + .execute = execute_kernexec_fptr,
79722 + .sub = NULL,
79723 + .next = NULL,
79724 + .static_pass_number = 0,
79725 + .tv_id = TV_NONE,
79726 + .properties_required = 0,
79727 + .properties_provided = 0,
79728 + .properties_destroyed = 0,
79729 + .todo_flags_start = 0,
79730 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79731 + }
79732 +};
79733 +
79734 +static struct rtl_opt_pass kernexec_retaddr_pass = {
79735 + .pass = {
79736 + .type = RTL_PASS,
79737 + .name = "kernexec_retaddr",
79738 + .gate = kernexec_cmodel_check,
79739 + .execute = execute_kernexec_retaddr,
79740 + .sub = NULL,
79741 + .next = NULL,
79742 + .static_pass_number = 0,
79743 + .tv_id = TV_NONE,
79744 + .properties_required = 0,
79745 + .properties_provided = 0,
79746 + .properties_destroyed = 0,
79747 + .todo_flags_start = 0,
79748 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
79749 + }
79750 +};
79751 +
79752 +static bool kernexec_cmodel_check(void)
79753 +{
79754 + tree section;
79755 +
79756 + if (ix86_cmodel != CM_KERNEL)
79757 + return false;
79758 +
79759 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
79760 + if (!section || !TREE_VALUE(section))
79761 + return true;
79762 +
79763 + section = TREE_VALUE(TREE_VALUE(section));
79764 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
79765 + return true;
79766 +
79767 + return false;
79768 +}
79769 +
79770 +/*
79771 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
79772 + */
79773 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
79774 +{
79775 + gimple asm_movabs_stmt;
79776 +
79777 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
79778 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
79779 + gimple_asm_set_volatile(asm_movabs_stmt, true);
79780 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
79781 + update_stmt(asm_movabs_stmt);
79782 +}
79783 +
79784 +/*
79785 + * find all asm() stmts that clobber r10 and add a reload of r10
79786 + */
79787 +static unsigned int execute_kernexec_reload(void)
79788 +{
79789 + basic_block bb;
79790 +
79791 + // 1. loop through BBs and GIMPLE statements
79792 + FOR_EACH_BB(bb) {
79793 + gimple_stmt_iterator gsi;
79794 +
79795 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79796 + // gimple match: __asm__ ("" : : : "r10");
79797 + gimple asm_stmt;
79798 + size_t nclobbers;
79799 +
79800 + // is it an asm ...
79801 + asm_stmt = gsi_stmt(gsi);
79802 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
79803 + continue;
79804 +
79805 + // ... clobbering r10
79806 + nclobbers = gimple_asm_nclobbers(asm_stmt);
79807 + while (nclobbers--) {
79808 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
79809 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
79810 + continue;
79811 + kernexec_reload_fptr_mask(&gsi);
79812 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
79813 + break;
79814 + }
79815 + }
79816 + }
79817 +
79818 + return 0;
79819 +}
79820 +
79821 +/*
79822 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
79823 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
79824 + */
79825 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
79826 +{
79827 + gimple assign_intptr, assign_new_fptr, call_stmt;
79828 + tree intptr, old_fptr, new_fptr, kernexec_mask;
79829 +
79830 + call_stmt = gsi_stmt(*gsi);
79831 + old_fptr = gimple_call_fn(call_stmt);
79832 +
79833 + // create temporary unsigned long variable used for bitops and cast fptr to it
79834 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
79835 + add_referenced_var(intptr);
79836 + mark_sym_for_renaming(intptr);
79837 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
79838 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
79839 + update_stmt(assign_intptr);
79840 +
79841 + // apply logical or to temporary unsigned long and bitmask
79842 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
79843 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
79844 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
79845 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
79846 + update_stmt(assign_intptr);
79847 +
79848 + // cast temporary unsigned long back to a temporary fptr variable
79849 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
79850 + add_referenced_var(new_fptr);
79851 + mark_sym_for_renaming(new_fptr);
79852 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
79853 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
79854 + update_stmt(assign_new_fptr);
79855 +
79856 + // replace call stmt fn with the new fptr
79857 + gimple_call_set_fn(call_stmt, new_fptr);
79858 + update_stmt(call_stmt);
79859 +}
79860 +
79861 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
79862 +{
79863 + gimple asm_or_stmt, call_stmt;
79864 + tree old_fptr, new_fptr, input, output;
79865 + VEC(tree, gc) *inputs = NULL;
79866 + VEC(tree, gc) *outputs = NULL;
79867 +
79868 + call_stmt = gsi_stmt(*gsi);
79869 + old_fptr = gimple_call_fn(call_stmt);
79870 +
79871 + // create temporary fptr variable
79872 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
79873 + add_referenced_var(new_fptr);
79874 + mark_sym_for_renaming(new_fptr);
79875 +
79876 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
79877 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
79878 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
79879 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
79880 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
79881 + VEC_safe_push(tree, gc, inputs, input);
79882 + VEC_safe_push(tree, gc, outputs, output);
79883 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
79884 + gimple_asm_set_volatile(asm_or_stmt, true);
79885 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
79886 + update_stmt(asm_or_stmt);
79887 +
79888 + // replace call stmt fn with the new fptr
79889 + gimple_call_set_fn(call_stmt, new_fptr);
79890 + update_stmt(call_stmt);
79891 +}
79892 +
79893 +/*
79894 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
79895 + */
79896 +static unsigned int execute_kernexec_fptr(void)
79897 +{
79898 + basic_block bb;
79899 +
79900 + // 1. loop through BBs and GIMPLE statements
79901 + FOR_EACH_BB(bb) {
79902 + gimple_stmt_iterator gsi;
79903 +
79904 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79905 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
79906 + tree fn;
79907 + gimple call_stmt;
79908 +
79909 + // is it a call ...
79910 + call_stmt = gsi_stmt(gsi);
79911 + if (!is_gimple_call(call_stmt))
79912 + continue;
79913 + fn = gimple_call_fn(call_stmt);
79914 + if (TREE_CODE(fn) == ADDR_EXPR)
79915 + continue;
79916 + if (TREE_CODE(fn) != SSA_NAME)
79917 + gcc_unreachable();
79918 +
79919 + // ... through a function pointer
79920 + fn = SSA_NAME_VAR(fn);
79921 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
79922 + continue;
79923 + fn = TREE_TYPE(fn);
79924 + if (TREE_CODE(fn) != POINTER_TYPE)
79925 + continue;
79926 + fn = TREE_TYPE(fn);
79927 + if (TREE_CODE(fn) != FUNCTION_TYPE)
79928 + continue;
79929 +
79930 + kernexec_instrument_fptr(&gsi);
79931 +
79932 +//debug_tree(gimple_call_fn(call_stmt));
79933 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79934 + }
79935 + }
79936 +
79937 + return 0;
79938 +}
79939 +
79940 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
79941 +static void kernexec_instrument_retaddr_bts(rtx insn)
79942 +{
79943 + rtx btsq;
79944 + rtvec argvec, constraintvec, labelvec;
79945 + int line;
79946 +
79947 + // create asm volatile("btsq $63,(%%rsp)":::)
79948 + argvec = rtvec_alloc(0);
79949 + constraintvec = rtvec_alloc(0);
79950 + labelvec = rtvec_alloc(0);
79951 + line = expand_location(RTL_LOCATION(insn)).line;
79952 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
79953 + MEM_VOLATILE_P(btsq) = 1;
79954 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
79955 + emit_insn_before(btsq, insn);
79956 +}
79957 +
79958 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
79959 +static void kernexec_instrument_retaddr_or(rtx insn)
79960 +{
79961 + rtx orq;
79962 + rtvec argvec, constraintvec, labelvec;
79963 + int line;
79964 +
79965 + // create asm volatile("orq %%r10,(%%rsp)":::)
79966 + argvec = rtvec_alloc(0);
79967 + constraintvec = rtvec_alloc(0);
79968 + labelvec = rtvec_alloc(0);
79969 + line = expand_location(RTL_LOCATION(insn)).line;
79970 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
79971 + MEM_VOLATILE_P(orq) = 1;
79972 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
79973 + emit_insn_before(orq, insn);
79974 +}
79975 +
79976 +/*
79977 + * find all asm level function returns and forcibly set the highest bit of the return address
79978 + */
79979 +static unsigned int execute_kernexec_retaddr(void)
79980 +{
79981 + rtx insn;
79982 +
79983 + // 1. find function returns
79984 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
79985 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
79986 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
79987 + rtx body;
79988 +
79989 + // is it a retn
79990 + if (!JUMP_P(insn))
79991 + continue;
79992 + body = PATTERN(insn);
79993 + if (GET_CODE(body) == PARALLEL)
79994 + body = XVECEXP(body, 0, 0);
79995 + if (GET_CODE(body) != RETURN)
79996 + continue;
79997 + kernexec_instrument_retaddr(insn);
79998 + }
79999 +
80000 +// print_simple_rtl(stderr, get_insns());
80001 +// print_rtl(stderr, get_insns());
80002 +
80003 + return 0;
80004 +}
80005 +
80006 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80007 +{
80008 + const char * const plugin_name = plugin_info->base_name;
80009 + const int argc = plugin_info->argc;
80010 + const struct plugin_argument * const argv = plugin_info->argv;
80011 + int i;
80012 + struct register_pass_info kernexec_reload_pass_info = {
80013 + .pass = &kernexec_reload_pass.pass,
80014 + .reference_pass_name = "ssa",
80015 + .ref_pass_instance_number = 0,
80016 + .pos_op = PASS_POS_INSERT_AFTER
80017 + };
80018 + struct register_pass_info kernexec_fptr_pass_info = {
80019 + .pass = &kernexec_fptr_pass.pass,
80020 + .reference_pass_name = "ssa",
80021 + .ref_pass_instance_number = 0,
80022 + .pos_op = PASS_POS_INSERT_AFTER
80023 + };
80024 + struct register_pass_info kernexec_retaddr_pass_info = {
80025 + .pass = &kernexec_retaddr_pass.pass,
80026 + .reference_pass_name = "pro_and_epilogue",
80027 + .ref_pass_instance_number = 0,
80028 + .pos_op = PASS_POS_INSERT_AFTER
80029 + };
80030 +
80031 + if (!plugin_default_version_check(version, &gcc_version)) {
80032 + error(G_("incompatible gcc/plugin versions"));
80033 + return 1;
80034 + }
80035 +
80036 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80037 +
80038 + if (TARGET_64BIT == 0)
80039 + return 0;
80040 +
80041 + for (i = 0; i < argc; ++i) {
80042 + if (!strcmp(argv[i].key, "method")) {
80043 + if (!argv[i].value) {
80044 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80045 + continue;
80046 + }
80047 + if (!strcmp(argv[i].value, "bts")) {
80048 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80049 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80050 + } else if (!strcmp(argv[i].value, "or")) {
80051 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80052 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80053 + fix_register("r10", 1, 1);
80054 + } else
80055 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80056 + continue;
80057 + }
80058 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80059 + }
80060 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80061 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80062 +
80063 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80064 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80065 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80066 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80067 +
80068 + return 0;
80069 +}
80070 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
80071 new file mode 100644
80072 index 0000000..b87ec9d
80073 --- /dev/null
80074 +++ b/tools/gcc/stackleak_plugin.c
80075 @@ -0,0 +1,313 @@
80076 +/*
80077 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80078 + * Licensed under the GPL v2
80079 + *
80080 + * Note: the choice of the license means that the compilation process is
80081 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80082 + * but for the kernel it doesn't matter since it doesn't link against
80083 + * any of the gcc libraries
80084 + *
80085 + * gcc plugin to help implement various PaX features
80086 + *
80087 + * - track lowest stack pointer
80088 + *
80089 + * TODO:
80090 + * - initialize all local variables
80091 + *
80092 + * BUGS:
80093 + * - none known
80094 + */
80095 +#include "gcc-plugin.h"
80096 +#include "config.h"
80097 +#include "system.h"
80098 +#include "coretypes.h"
80099 +#include "tree.h"
80100 +#include "tree-pass.h"
80101 +#include "flags.h"
80102 +#include "intl.h"
80103 +#include "toplev.h"
80104 +#include "plugin.h"
80105 +//#include "expr.h" where are you...
80106 +#include "diagnostic.h"
80107 +#include "plugin-version.h"
80108 +#include "tm.h"
80109 +#include "function.h"
80110 +#include "basic-block.h"
80111 +#include "gimple.h"
80112 +#include "rtl.h"
80113 +#include "emit-rtl.h"
80114 +
80115 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80116 +
80117 +int plugin_is_GPL_compatible;
80118 +
80119 +static int track_frame_size = -1;
80120 +static const char track_function[] = "pax_track_stack";
80121 +static const char check_function[] = "pax_check_alloca";
80122 +static bool init_locals;
80123 +
80124 +static struct plugin_info stackleak_plugin_info = {
80125 + .version = "201203140940",
80126 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
80127 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
80128 +};
80129 +
80130 +static bool gate_stackleak_track_stack(void);
80131 +static unsigned int execute_stackleak_tree_instrument(void);
80132 +static unsigned int execute_stackleak_final(void);
80133 +
80134 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
80135 + .pass = {
80136 + .type = GIMPLE_PASS,
80137 + .name = "stackleak_tree_instrument",
80138 + .gate = gate_stackleak_track_stack,
80139 + .execute = execute_stackleak_tree_instrument,
80140 + .sub = NULL,
80141 + .next = NULL,
80142 + .static_pass_number = 0,
80143 + .tv_id = TV_NONE,
80144 + .properties_required = PROP_gimple_leh | PROP_cfg,
80145 + .properties_provided = 0,
80146 + .properties_destroyed = 0,
80147 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
80148 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
80149 + }
80150 +};
80151 +
80152 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
80153 + .pass = {
80154 + .type = RTL_PASS,
80155 + .name = "stackleak_final",
80156 + .gate = gate_stackleak_track_stack,
80157 + .execute = execute_stackleak_final,
80158 + .sub = NULL,
80159 + .next = NULL,
80160 + .static_pass_number = 0,
80161 + .tv_id = TV_NONE,
80162 + .properties_required = 0,
80163 + .properties_provided = 0,
80164 + .properties_destroyed = 0,
80165 + .todo_flags_start = 0,
80166 + .todo_flags_finish = TODO_dump_func
80167 + }
80168 +};
80169 +
80170 +static bool gate_stackleak_track_stack(void)
80171 +{
80172 + return track_frame_size >= 0;
80173 +}
80174 +
80175 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
80176 +{
80177 + gimple check_alloca;
80178 + tree fntype, fndecl, alloca_size;
80179 +
80180 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
80181 + fndecl = build_fn_decl(check_function, fntype);
80182 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
80183 +
80184 + // insert call to void pax_check_alloca(unsigned long size)
80185 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
80186 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
80187 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
80188 +}
80189 +
80190 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
80191 +{
80192 + gimple track_stack;
80193 + tree fntype, fndecl;
80194 +
80195 + fntype = build_function_type_list(void_type_node, NULL_TREE);
80196 + fndecl = build_fn_decl(track_function, fntype);
80197 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
80198 +
80199 + // insert call to void pax_track_stack(void)
80200 + track_stack = gimple_build_call(fndecl, 0);
80201 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
80202 +}
80203 +
80204 +#if BUILDING_GCC_VERSION == 4005
80205 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
80206 +{
80207 + tree fndecl;
80208 +
80209 + if (!is_gimple_call(stmt))
80210 + return false;
80211 + fndecl = gimple_call_fndecl(stmt);
80212 + if (!fndecl)
80213 + return false;
80214 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
80215 + return false;
80216 +// print_node(stderr, "pax", fndecl, 4);
80217 + return DECL_FUNCTION_CODE(fndecl) == code;
80218 +}
80219 +#endif
80220 +
80221 +static bool is_alloca(gimple stmt)
80222 +{
80223 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
80224 + return true;
80225 +
80226 +#if BUILDING_GCC_VERSION >= 4007
80227 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
80228 + return true;
80229 +#endif
80230 +
80231 + return false;
80232 +}
80233 +
80234 +static unsigned int execute_stackleak_tree_instrument(void)
80235 +{
80236 + basic_block bb, entry_bb;
80237 + bool prologue_instrumented = false, is_leaf = true;
80238 +
80239 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
80240 +
80241 + // 1. loop through BBs and GIMPLE statements
80242 + FOR_EACH_BB(bb) {
80243 + gimple_stmt_iterator gsi;
80244 +
80245 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80246 + gimple stmt;
80247 +
80248 + stmt = gsi_stmt(gsi);
80249 +
80250 + if (is_gimple_call(stmt))
80251 + is_leaf = false;
80252 +
80253 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
80254 + if (!is_alloca(stmt))
80255 + continue;
80256 +
80257 + // 2. insert stack overflow check before each __builtin_alloca call
80258 + stackleak_check_alloca(&gsi);
80259 +
80260 + // 3. insert track call after each __builtin_alloca call
80261 + stackleak_add_instrumentation(&gsi);
80262 + if (bb == entry_bb)
80263 + prologue_instrumented = true;
80264 + }
80265 + }
80266 +
80267 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
80268 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
80269 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
80270 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
80271 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
80272 + return 0;
80273 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
80274 + return 0;
80275 +
80276 + // 4. insert track call at the beginning
80277 + if (!prologue_instrumented) {
80278 + gimple_stmt_iterator gsi;
80279 +
80280 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
80281 + if (dom_info_available_p(CDI_DOMINATORS))
80282 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
80283 + gsi = gsi_start_bb(bb);
80284 + stackleak_add_instrumentation(&gsi);
80285 + }
80286 +
80287 + return 0;
80288 +}
80289 +
80290 +static unsigned int execute_stackleak_final(void)
80291 +{
80292 + rtx insn;
80293 +
80294 + if (cfun->calls_alloca)
80295 + return 0;
80296 +
80297 + // keep calls only if function frame is big enough
80298 + if (get_frame_size() >= track_frame_size)
80299 + return 0;
80300 +
80301 + // 1. find pax_track_stack calls
80302 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80303 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
80304 + rtx body;
80305 +
80306 + if (!CALL_P(insn))
80307 + continue;
80308 + body = PATTERN(insn);
80309 + if (GET_CODE(body) != CALL)
80310 + continue;
80311 + body = XEXP(body, 0);
80312 + if (GET_CODE(body) != MEM)
80313 + continue;
80314 + body = XEXP(body, 0);
80315 + if (GET_CODE(body) != SYMBOL_REF)
80316 + continue;
80317 + if (strcmp(XSTR(body, 0), track_function))
80318 + continue;
80319 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80320 + // 2. delete call
80321 + insn = delete_insn_and_edges(insn);
80322 +#if BUILDING_GCC_VERSION >= 4007
80323 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
80324 + insn = delete_insn_and_edges(insn);
80325 +#endif
80326 + }
80327 +
80328 +// print_simple_rtl(stderr, get_insns());
80329 +// print_rtl(stderr, get_insns());
80330 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80331 +
80332 + return 0;
80333 +}
80334 +
80335 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80336 +{
80337 + const char * const plugin_name = plugin_info->base_name;
80338 + const int argc = plugin_info->argc;
80339 + const struct plugin_argument * const argv = plugin_info->argv;
80340 + int i;
80341 + struct register_pass_info stackleak_tree_instrument_pass_info = {
80342 + .pass = &stackleak_tree_instrument_pass.pass,
80343 +// .reference_pass_name = "tree_profile",
80344 + .reference_pass_name = "optimized",
80345 + .ref_pass_instance_number = 0,
80346 + .pos_op = PASS_POS_INSERT_BEFORE
80347 + };
80348 + struct register_pass_info stackleak_final_pass_info = {
80349 + .pass = &stackleak_final_rtl_opt_pass.pass,
80350 + .reference_pass_name = "final",
80351 + .ref_pass_instance_number = 0,
80352 + .pos_op = PASS_POS_INSERT_BEFORE
80353 + };
80354 +
80355 + if (!plugin_default_version_check(version, &gcc_version)) {
80356 + error(G_("incompatible gcc/plugin versions"));
80357 + return 1;
80358 + }
80359 +
80360 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
80361 +
80362 + for (i = 0; i < argc; ++i) {
80363 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
80364 + if (!argv[i].value) {
80365 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80366 + continue;
80367 + }
80368 + track_frame_size = atoi(argv[i].value);
80369 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
80370 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80371 + continue;
80372 + }
80373 + if (!strcmp(argv[i].key, "initialize-locals")) {
80374 + if (argv[i].value) {
80375 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80376 + continue;
80377 + }
80378 + init_locals = true;
80379 + continue;
80380 + }
80381 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80382 + }
80383 +
80384 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
80385 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
80386 +
80387 + return 0;
80388 +}
80389 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
80390 index 6789d78..4afd019 100644
80391 --- a/tools/perf/util/include/asm/alternative-asm.h
80392 +++ b/tools/perf/util/include/asm/alternative-asm.h
80393 @@ -5,4 +5,7 @@
80394
80395 #define altinstruction_entry #
80396
80397 + .macro pax_force_retaddr rip=0, reload=0
80398 + .endm
80399 +
80400 #endif
80401 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
80402 index af0f22f..9a7d479 100644
80403 --- a/usr/gen_init_cpio.c
80404 +++ b/usr/gen_init_cpio.c
80405 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
80406 int retval;
80407 int rc = -1;
80408 int namesize;
80409 - int i;
80410 + unsigned int i;
80411
80412 mode |= S_IFREG;
80413
80414 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
80415 *env_var = *expanded = '\0';
80416 strncat(env_var, start + 2, end - start - 2);
80417 strncat(expanded, new_location, start - new_location);
80418 - strncat(expanded, getenv(env_var), PATH_MAX);
80419 - strncat(expanded, end + 1, PATH_MAX);
80420 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
80421 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
80422 strncpy(new_location, expanded, PATH_MAX);
80423 + new_location[PATH_MAX] = 0;
80424 } else
80425 break;
80426 }
80427 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
80428 index a91f980..a58d32c 100644
80429 --- a/virt/kvm/kvm_main.c
80430 +++ b/virt/kvm/kvm_main.c
80431 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
80432
80433 static cpumask_var_t cpus_hardware_enabled;
80434 static int kvm_usage_count = 0;
80435 -static atomic_t hardware_enable_failed;
80436 +static atomic_unchecked_t hardware_enable_failed;
80437
80438 struct kmem_cache *kvm_vcpu_cache;
80439 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
80440 @@ -2312,7 +2312,7 @@ static void hardware_enable_nolock(void *junk)
80441
80442 if (r) {
80443 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
80444 - atomic_inc(&hardware_enable_failed);
80445 + atomic_inc_unchecked(&hardware_enable_failed);
80446 printk(KERN_INFO "kvm: enabling virtualization on "
80447 "CPU%d failed\n", cpu);
80448 }
80449 @@ -2366,10 +2366,10 @@ static int hardware_enable_all(void)
80450
80451 kvm_usage_count++;
80452 if (kvm_usage_count == 1) {
80453 - atomic_set(&hardware_enable_failed, 0);
80454 + atomic_set_unchecked(&hardware_enable_failed, 0);
80455 on_each_cpu(hardware_enable_nolock, NULL, 1);
80456
80457 - if (atomic_read(&hardware_enable_failed)) {
80458 + if (atomic_read_unchecked(&hardware_enable_failed)) {
80459 hardware_disable_all_nolock();
80460 r = -EBUSY;
80461 }
80462 @@ -2732,7 +2732,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
80463 kvm_arch_vcpu_put(vcpu);
80464 }
80465
80466 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80467 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80468 struct module *module)
80469 {
80470 int r;
80471 @@ -2795,7 +2795,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80472 if (!vcpu_align)
80473 vcpu_align = __alignof__(struct kvm_vcpu);
80474 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
80475 - 0, NULL);
80476 + SLAB_USERCOPY, NULL);
80477 if (!kvm_vcpu_cache) {
80478 r = -ENOMEM;
80479 goto out_free_3;
80480 @@ -2805,9 +2805,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80481 if (r)
80482 goto out_free;
80483
80484 - kvm_chardev_ops.owner = module;
80485 - kvm_vm_fops.owner = module;
80486 - kvm_vcpu_fops.owner = module;
80487 + pax_open_kernel();
80488 + *(void **)&kvm_chardev_ops.owner = module;
80489 + *(void **)&kvm_vm_fops.owner = module;
80490 + *(void **)&kvm_vcpu_fops.owner = module;
80491 + pax_close_kernel();
80492
80493 r = misc_register(&kvm_dev);
80494 if (r) {