]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.3.0-201203251922.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.3.0-201203251922.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 0c083c5..9c2512a 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -69,6 +73,7 @@ Image
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -92,19 +97,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -115,9 +125,11 @@ devlist.h*
67 dnotify_test
68 docproc
69 dslm
70 +dtc-lexer.lex.c
71 elf2ecoff
72 elfconfig.h*
73 evergreen_reg_safe.h
74 +exception_policy.conf
75 fixdep
76 flask.h
77 fore200e_mkfirm
78 @@ -125,12 +137,15 @@ fore200e_pca_fw.c*
79 gconf
80 gconf.glade.h
81 gen-devlist
82 +gen-kdb_cmds.c
83 gen_crc32table
84 gen_init_cpio
85 generated
86 genheaders
87 genksyms
88 *_gray256.c
89 +hash
90 +hid-example
91 hpet_example
92 hugepage-mmap
93 hugepage-shm
94 @@ -145,7 +160,7 @@ int32.c
95 int4.c
96 int8.c
97 kallsyms
98 -kconfig
99 +kern_constants.h
100 keywords.c
101 ksym.c*
102 ksym.h*
103 @@ -153,7 +168,7 @@ kxgettext
104 lkc_defs.h
105 lex.c
106 lex.*.c
107 -linux
108 +lib1funcs.S
109 logo_*.c
110 logo_*_clut224.c
111 logo_*_mono.c
112 @@ -165,14 +180,15 @@ machtypes.h
113 map
114 map_hugetlb
115 maui_boot.h
116 -media
117 mconf
118 +mdp
119 miboot*
120 mk_elfconfig
121 mkboot
122 mkbugboot
123 mkcpustr
124 mkdep
125 +mkpiggy
126 mkprep
127 mkregtable
128 mktables
129 @@ -208,6 +224,7 @@ r300_reg_safe.h
130 r420_reg_safe.h
131 r600_reg_safe.h
132 recordmcount
133 +regdb.c
134 relocs
135 rlim_names.h
136 rn50_reg_safe.h
137 @@ -218,6 +235,7 @@ setup
138 setup.bin
139 setup.elf
140 sImage
141 +slabinfo
142 sm_tbl*
143 split-include
144 syscalltab.h
145 @@ -228,6 +246,7 @@ tftpboot.img
146 timeconst.h
147 times.h*
148 trix_boot.h
149 +user_constants.h
150 utsrelease.h*
151 vdso-syms.lds
152 vdso.lds
153 @@ -245,7 +264,9 @@ vmlinux
154 vmlinux-*
155 vmlinux.aout
156 vmlinux.bin.all
157 +vmlinux.bin.bz2
158 vmlinux.lds
159 +vmlinux.relocs
160 vmlinuz
161 voffset.h
162 vsyscall.lds
163 @@ -253,9 +274,11 @@ vsyscall_32.lds
164 wanxlfw.inc
165 uImage
166 unifdef
167 +utsrelease.h
168 wakeup.bin
169 wakeup.elf
170 wakeup.lds
171 zImage*
172 zconf.hash.c
173 +zconf.lex.c
174 zoffset.h
175 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
176 index d99fd9c..8689fef 100644
177 --- a/Documentation/kernel-parameters.txt
178 +++ b/Documentation/kernel-parameters.txt
179 @@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
180 the specified number of seconds. This is to be used if
181 your oopses keep scrolling off the screen.
182
183 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
184 + virtualization environments that don't cope well with the
185 + expand down segment used by UDEREF on X86-32 or the frequent
186 + page table updates on X86-64.
187 +
188 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
189 +
190 pcbit= [HW,ISDN]
191
192 pcd. [PARIDE]
193 diff --git a/Makefile b/Makefile
194 index 1932984..0204e68 100644
195 --- a/Makefile
196 +++ b/Makefile
197 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
198
199 HOSTCC = gcc
200 HOSTCXX = g++
201 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
202 -HOSTCXXFLAGS = -O2
203 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
204 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
205 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
206
207 # Decide whether to build built-in, modular, or both.
208 # Normally, just do built-in.
209 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
210 # Rules shared between *config targets and build targets
211
212 # Basic helpers built in scripts/
213 -PHONY += scripts_basic
214 -scripts_basic:
215 +PHONY += scripts_basic gcc-plugins
216 +scripts_basic: gcc-plugins
217 $(Q)$(MAKE) $(build)=scripts/basic
218 $(Q)rm -f .tmp_quiet_recordmcount
219
220 @@ -564,6 +565,50 @@ else
221 KBUILD_CFLAGS += -O2
222 endif
223
224 +ifndef DISABLE_PAX_PLUGINS
225 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
226 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
227 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
228 +endif
229 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
230 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
231 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
232 +endif
233 +ifdef CONFIG_KALLOCSTAT_PLUGIN
234 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
235 +endif
236 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
237 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
238 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
239 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
240 +endif
241 +ifdef CONFIG_CHECKER_PLUGIN
242 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
243 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
244 +endif
245 +endif
246 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
247 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
248 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
249 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
250 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
251 +ifeq ($(KBUILD_EXTMOD),)
252 +gcc-plugins:
253 + $(Q)$(MAKE) $(build)=tools/gcc
254 +else
255 +gcc-plugins: ;
256 +endif
257 +else
258 +gcc-plugins:
259 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
260 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
261 +else
262 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
263 +endif
264 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
265 +endif
266 +endif
267 +
268 include $(srctree)/arch/$(SRCARCH)/Makefile
269
270 ifneq ($(CONFIG_FRAME_WARN),0)
271 @@ -708,7 +753,7 @@ export mod_strip_cmd
272
273
274 ifeq ($(KBUILD_EXTMOD),)
275 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
276 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
277
278 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
279 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
280 @@ -932,6 +977,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
281
282 # The actual objects are generated when descending,
283 # make sure no implicit rule kicks in
284 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
286 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
287
288 # Handle descending into subdirectories listed in $(vmlinux-dirs)
289 @@ -941,7 +988,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
290 # Error messages still appears in the original language
291
292 PHONY += $(vmlinux-dirs)
293 -$(vmlinux-dirs): prepare scripts
294 +$(vmlinux-dirs): gcc-plugins prepare scripts
295 $(Q)$(MAKE) $(build)=$@
296
297 # Store (new) KERNELRELASE string in include/config/kernel.release
298 @@ -985,6 +1032,7 @@ prepare0: archprepare FORCE
299 $(Q)$(MAKE) $(build)=.
300
301 # All the preparing..
302 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
303 prepare: prepare0
304
305 # Generate some files
306 @@ -1089,6 +1137,8 @@ all: modules
307 # using awk while concatenating to the final file.
308
309 PHONY += modules
310 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
311 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
312 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
313 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
314 @$(kecho) ' Building modules, stage 2.';
315 @@ -1104,7 +1154,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
316
317 # Target to prepare building external modules
318 PHONY += modules_prepare
319 -modules_prepare: prepare scripts
320 +modules_prepare: gcc-plugins prepare scripts
321
322 # Target to install modules
323 PHONY += modules_install
324 @@ -1201,6 +1251,7 @@ distclean: mrproper
325 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
326 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
327 -o -name '.*.rej' \
328 + -o -name '.*.rej' -o -name '*.so' \
329 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
330 -type f -print | xargs rm -f
331
332 @@ -1361,6 +1412,8 @@ PHONY += $(module-dirs) modules
333 $(module-dirs): crmodverdir $(objtree)/Module.symvers
334 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
335
336 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
337 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
338 modules: $(module-dirs)
339 @$(kecho) ' Building modules, stage 2.';
340 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
341 @@ -1487,17 +1540,21 @@ else
342 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
343 endif
344
345 -%.s: %.c prepare scripts FORCE
346 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348 +%.s: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.i: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 -%.o: %.c prepare scripts FORCE
353 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
354 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
355 +%.o: %.c gcc-plugins prepare scripts FORCE
356 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
357 %.lst: %.c prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 -%.s: %.S prepare scripts FORCE
360 +%.s: %.S gcc-plugins prepare scripts FORCE
361 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
362 -%.o: %.S prepare scripts FORCE
363 +%.o: %.S gcc-plugins prepare scripts FORCE
364 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
365 %.symtypes: %.c prepare scripts FORCE
366 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
367 @@ -1507,11 +1564,15 @@ endif
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371 -%/: prepare scripts FORCE
372 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374 +%/: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir)
378 -%.ko: prepare scripts FORCE
379 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381 +%.ko: gcc-plugins prepare scripts FORCE
382 $(cmd_crmodverdir)
383 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
384 $(build)=$(build-dir) $(@:.ko=.o)
385 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
386 index 640f909..48b6597 100644
387 --- a/arch/alpha/include/asm/atomic.h
388 +++ b/arch/alpha/include/asm/atomic.h
389 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
390 #define atomic_dec(v) atomic_sub(1,(v))
391 #define atomic64_dec(v) atomic64_sub(1,(v))
392
393 +#define atomic64_read_unchecked(v) atomic64_read(v)
394 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
395 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
396 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
397 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
398 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
399 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
400 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
401 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
402 +
403 #define smp_mb__before_atomic_dec() smp_mb()
404 #define smp_mb__after_atomic_dec() smp_mb()
405 #define smp_mb__before_atomic_inc() smp_mb()
406 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
407 index ad368a9..fbe0f25 100644
408 --- a/arch/alpha/include/asm/cache.h
409 +++ b/arch/alpha/include/asm/cache.h
410 @@ -4,19 +4,19 @@
411 #ifndef __ARCH_ALPHA_CACHE_H
412 #define __ARCH_ALPHA_CACHE_H
413
414 +#include <linux/const.h>
415
416 /* Bytes per L1 (data) cache line. */
417 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
418 -# define L1_CACHE_BYTES 64
419 # define L1_CACHE_SHIFT 6
420 #else
421 /* Both EV4 and EV5 are write-through, read-allocate,
422 direct-mapped, physical.
423 */
424 -# define L1_CACHE_BYTES 32
425 # define L1_CACHE_SHIFT 5
426 #endif
427
428 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
429 #define SMP_CACHE_BYTES L1_CACHE_BYTES
430
431 #endif
432 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
433 index da5449e..7418343 100644
434 --- a/arch/alpha/include/asm/elf.h
435 +++ b/arch/alpha/include/asm/elf.h
436 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
437
438 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
439
440 +#ifdef CONFIG_PAX_ASLR
441 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
442 +
443 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
444 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
445 +#endif
446 +
447 /* $0 is set by ld.so to a pointer to a function which might be
448 registered using atexit. This provides a mean for the dynamic
449 linker to call DT_FINI functions for shared libraries that have
450 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
451 index de98a73..bd4f1f8 100644
452 --- a/arch/alpha/include/asm/pgtable.h
453 +++ b/arch/alpha/include/asm/pgtable.h
454 @@ -101,6 +101,17 @@ struct vm_area_struct;
455 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
456 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
457 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
458 +
459 +#ifdef CONFIG_PAX_PAGEEXEC
460 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
461 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
462 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
463 +#else
464 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
465 +# define PAGE_COPY_NOEXEC PAGE_COPY
466 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
467 +#endif
468 +
469 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
470
471 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
472 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
473 index 2fd00b7..cfd5069 100644
474 --- a/arch/alpha/kernel/module.c
475 +++ b/arch/alpha/kernel/module.c
476 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
477
478 /* The small sections were sorted to the end of the segment.
479 The following should definitely cover them. */
480 - gp = (u64)me->module_core + me->core_size - 0x8000;
481 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
482 got = sechdrs[me->arch.gotsecindex].sh_addr;
483
484 for (i = 0; i < n; i++) {
485 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
486 index 01e8715..be0e80f 100644
487 --- a/arch/alpha/kernel/osf_sys.c
488 +++ b/arch/alpha/kernel/osf_sys.c
489 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
490 /* At this point: (!vma || addr < vma->vm_end). */
491 if (limit - len < addr)
492 return -ENOMEM;
493 - if (!vma || addr + len <= vma->vm_start)
494 + if (check_heap_stack_gap(vma, addr, len))
495 return addr;
496 addr = vma->vm_end;
497 vma = vma->vm_next;
498 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
499 merely specific addresses, but regions of memory -- perhaps
500 this feature should be incorporated into all ports? */
501
502 +#ifdef CONFIG_PAX_RANDMMAP
503 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
504 +#endif
505 +
506 if (addr) {
507 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
508 if (addr != (unsigned long) -ENOMEM)
509 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
510 }
511
512 /* Next, try allocating at TASK_UNMAPPED_BASE. */
513 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
514 - len, limit);
515 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
516 +
517 if (addr != (unsigned long) -ENOMEM)
518 return addr;
519
520 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
521 index fadd5f8..904e73a 100644
522 --- a/arch/alpha/mm/fault.c
523 +++ b/arch/alpha/mm/fault.c
524 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
525 __reload_thread(pcb);
526 }
527
528 +#ifdef CONFIG_PAX_PAGEEXEC
529 +/*
530 + * PaX: decide what to do with offenders (regs->pc = fault address)
531 + *
532 + * returns 1 when task should be killed
533 + * 2 when patched PLT trampoline was detected
534 + * 3 when unpatched PLT trampoline was detected
535 + */
536 +static int pax_handle_fetch_fault(struct pt_regs *regs)
537 +{
538 +
539 +#ifdef CONFIG_PAX_EMUPLT
540 + int err;
541 +
542 + do { /* PaX: patched PLT emulation #1 */
543 + unsigned int ldah, ldq, jmp;
544 +
545 + err = get_user(ldah, (unsigned int *)regs->pc);
546 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
547 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
548 +
549 + if (err)
550 + break;
551 +
552 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
553 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
554 + jmp == 0x6BFB0000U)
555 + {
556 + unsigned long r27, addr;
557 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
558 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
559 +
560 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
561 + err = get_user(r27, (unsigned long *)addr);
562 + if (err)
563 + break;
564 +
565 + regs->r27 = r27;
566 + regs->pc = r27;
567 + return 2;
568 + }
569 + } while (0);
570 +
571 + do { /* PaX: patched PLT emulation #2 */
572 + unsigned int ldah, lda, br;
573 +
574 + err = get_user(ldah, (unsigned int *)regs->pc);
575 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
576 + err |= get_user(br, (unsigned int *)(regs->pc+8));
577 +
578 + if (err)
579 + break;
580 +
581 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
582 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
583 + (br & 0xFFE00000U) == 0xC3E00000U)
584 + {
585 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
586 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
587 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
588 +
589 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
590 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
591 + return 2;
592 + }
593 + } while (0);
594 +
595 + do { /* PaX: unpatched PLT emulation */
596 + unsigned int br;
597 +
598 + err = get_user(br, (unsigned int *)regs->pc);
599 +
600 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
601 + unsigned int br2, ldq, nop, jmp;
602 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
603 +
604 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
605 + err = get_user(br2, (unsigned int *)addr);
606 + err |= get_user(ldq, (unsigned int *)(addr+4));
607 + err |= get_user(nop, (unsigned int *)(addr+8));
608 + err |= get_user(jmp, (unsigned int *)(addr+12));
609 + err |= get_user(resolver, (unsigned long *)(addr+16));
610 +
611 + if (err)
612 + break;
613 +
614 + if (br2 == 0xC3600000U &&
615 + ldq == 0xA77B000CU &&
616 + nop == 0x47FF041FU &&
617 + jmp == 0x6B7B0000U)
618 + {
619 + regs->r28 = regs->pc+4;
620 + regs->r27 = addr+16;
621 + regs->pc = resolver;
622 + return 3;
623 + }
624 + }
625 + } while (0);
626 +#endif
627 +
628 + return 1;
629 +}
630 +
631 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
632 +{
633 + unsigned long i;
634 +
635 + printk(KERN_ERR "PAX: bytes at PC: ");
636 + for (i = 0; i < 5; i++) {
637 + unsigned int c;
638 + if (get_user(c, (unsigned int *)pc+i))
639 + printk(KERN_CONT "???????? ");
640 + else
641 + printk(KERN_CONT "%08x ", c);
642 + }
643 + printk("\n");
644 +}
645 +#endif
646
647 /*
648 * This routine handles page faults. It determines the address,
649 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
650 good_area:
651 si_code = SEGV_ACCERR;
652 if (cause < 0) {
653 - if (!(vma->vm_flags & VM_EXEC))
654 + if (!(vma->vm_flags & VM_EXEC)) {
655 +
656 +#ifdef CONFIG_PAX_PAGEEXEC
657 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
658 + goto bad_area;
659 +
660 + up_read(&mm->mmap_sem);
661 + switch (pax_handle_fetch_fault(regs)) {
662 +
663 +#ifdef CONFIG_PAX_EMUPLT
664 + case 2:
665 + case 3:
666 + return;
667 +#endif
668 +
669 + }
670 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
671 + do_group_exit(SIGKILL);
672 +#else
673 goto bad_area;
674 +#endif
675 +
676 + }
677 } else if (!cause) {
678 /* Allow reads even for write-only mappings */
679 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
680 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
681 index 86976d0..8e07f84 100644
682 --- a/arch/arm/include/asm/atomic.h
683 +++ b/arch/arm/include/asm/atomic.h
684 @@ -15,6 +15,10 @@
685 #include <linux/types.h>
686 #include <asm/system.h>
687
688 +#ifdef CONFIG_GENERIC_ATOMIC64
689 +#include <asm-generic/atomic64.h>
690 +#endif
691 +
692 #define ATOMIC_INIT(i) { (i) }
693
694 #ifdef __KERNEL__
695 @@ -25,7 +29,15 @@
696 * atomic_set() is the clrex or dummy strex done on every exception return.
697 */
698 #define atomic_read(v) (*(volatile int *)&(v)->counter)
699 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
700 +{
701 + return v->counter;
702 +}
703 #define atomic_set(v,i) (((v)->counter) = (i))
704 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
705 +{
706 + v->counter = i;
707 +}
708
709 #if __LINUX_ARM_ARCH__ >= 6
710
711 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
712 int result;
713
714 __asm__ __volatile__("@ atomic_add\n"
715 +"1: ldrex %1, [%3]\n"
716 +" adds %0, %1, %4\n"
717 +
718 +#ifdef CONFIG_PAX_REFCOUNT
719 +" bvc 3f\n"
720 +"2: bkpt 0xf103\n"
721 +"3:\n"
722 +#endif
723 +
724 +" strex %1, %0, [%3]\n"
725 +" teq %1, #0\n"
726 +" bne 1b"
727 +
728 +#ifdef CONFIG_PAX_REFCOUNT
729 +"\n4:\n"
730 + _ASM_EXTABLE(2b, 4b)
731 +#endif
732 +
733 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
734 + : "r" (&v->counter), "Ir" (i)
735 + : "cc");
736 +}
737 +
738 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
739 +{
740 + unsigned long tmp;
741 + int result;
742 +
743 + __asm__ __volatile__("@ atomic_add_unchecked\n"
744 "1: ldrex %0, [%3]\n"
745 " add %0, %0, %4\n"
746 " strex %1, %0, [%3]\n"
747 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
748 smp_mb();
749
750 __asm__ __volatile__("@ atomic_add_return\n"
751 +"1: ldrex %1, [%3]\n"
752 +" adds %0, %1, %4\n"
753 +
754 +#ifdef CONFIG_PAX_REFCOUNT
755 +" bvc 3f\n"
756 +" mov %0, %1\n"
757 +"2: bkpt 0xf103\n"
758 +"3:\n"
759 +#endif
760 +
761 +" strex %1, %0, [%3]\n"
762 +" teq %1, #0\n"
763 +" bne 1b"
764 +
765 +#ifdef CONFIG_PAX_REFCOUNT
766 +"\n4:\n"
767 + _ASM_EXTABLE(2b, 4b)
768 +#endif
769 +
770 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
771 + : "r" (&v->counter), "Ir" (i)
772 + : "cc");
773 +
774 + smp_mb();
775 +
776 + return result;
777 +}
778 +
779 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
780 +{
781 + unsigned long tmp;
782 + int result;
783 +
784 + smp_mb();
785 +
786 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
787 "1: ldrex %0, [%3]\n"
788 " add %0, %0, %4\n"
789 " strex %1, %0, [%3]\n"
790 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
791 int result;
792
793 __asm__ __volatile__("@ atomic_sub\n"
794 +"1: ldrex %1, [%3]\n"
795 +" subs %0, %1, %4\n"
796 +
797 +#ifdef CONFIG_PAX_REFCOUNT
798 +" bvc 3f\n"
799 +"2: bkpt 0xf103\n"
800 +"3:\n"
801 +#endif
802 +
803 +" strex %1, %0, [%3]\n"
804 +" teq %1, #0\n"
805 +" bne 1b"
806 +
807 +#ifdef CONFIG_PAX_REFCOUNT
808 +"\n4:\n"
809 + _ASM_EXTABLE(2b, 4b)
810 +#endif
811 +
812 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
813 + : "r" (&v->counter), "Ir" (i)
814 + : "cc");
815 +}
816 +
817 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
818 +{
819 + unsigned long tmp;
820 + int result;
821 +
822 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
823 "1: ldrex %0, [%3]\n"
824 " sub %0, %0, %4\n"
825 " strex %1, %0, [%3]\n"
826 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
827 smp_mb();
828
829 __asm__ __volatile__("@ atomic_sub_return\n"
830 -"1: ldrex %0, [%3]\n"
831 -" sub %0, %0, %4\n"
832 +"1: ldrex %1, [%3]\n"
833 +" sub %0, %1, %4\n"
834 +
835 +#ifdef CONFIG_PAX_REFCOUNT
836 +" bvc 3f\n"
837 +" mov %0, %1\n"
838 +"2: bkpt 0xf103\n"
839 +"3:\n"
840 +#endif
841 +
842 " strex %1, %0, [%3]\n"
843 " teq %1, #0\n"
844 " bne 1b"
845 +
846 +#ifdef CONFIG_PAX_REFCOUNT
847 +"\n4:\n"
848 + _ASM_EXTABLE(2b, 4b)
849 +#endif
850 +
851 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
852 : "r" (&v->counter), "Ir" (i)
853 : "cc");
854 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
855 return oldval;
856 }
857
858 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
859 +{
860 + unsigned long oldval, res;
861 +
862 + smp_mb();
863 +
864 + do {
865 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
866 + "ldrex %1, [%3]\n"
867 + "mov %0, #0\n"
868 + "teq %1, %4\n"
869 + "strexeq %0, %5, [%3]\n"
870 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
871 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
872 + : "cc");
873 + } while (res);
874 +
875 + smp_mb();
876 +
877 + return oldval;
878 +}
879 +
880 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
881 {
882 unsigned long tmp, tmp2;
883 @@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
884
885 return val;
886 }
887 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
888 #define atomic_add(i, v) (void) atomic_add_return(i, v)
889 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
890
891 static inline int atomic_sub_return(int i, atomic_t *v)
892 {
893 @@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
894
895 return val;
896 }
897 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
898 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
899 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
900
901 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
902 {
903 @@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
904
905 return ret;
906 }
907 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
908
909 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
910 {
911 @@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
912 #endif /* __LINUX_ARM_ARCH__ */
913
914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
915 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
916 +{
917 + return xchg(&v->counter, new);
918 +}
919
920 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
921 {
922 @@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
923 }
924
925 #define atomic_inc(v) atomic_add(1, v)
926 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
927 +{
928 + atomic_add_unchecked(1, v);
929 +}
930 #define atomic_dec(v) atomic_sub(1, v)
931 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
932 +{
933 + atomic_sub_unchecked(1, v);
934 +}
935
936 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
937 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
938 +{
939 + return atomic_add_return_unchecked(1, v) == 0;
940 +}
941 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
942 #define atomic_inc_return(v) (atomic_add_return(1, v))
943 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
944 +{
945 + return atomic_add_return_unchecked(1, v);
946 +}
947 #define atomic_dec_return(v) (atomic_sub_return(1, v))
948 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
949
950 @@ -239,6 +406,14 @@ typedef struct {
951 u64 __aligned(8) counter;
952 } atomic64_t;
953
954 +#ifdef CONFIG_PAX_REFCOUNT
955 +typedef struct {
956 + u64 __aligned(8) counter;
957 +} atomic64_unchecked_t;
958 +#else
959 +typedef atomic64_t atomic64_unchecked_t;
960 +#endif
961 +
962 #define ATOMIC64_INIT(i) { (i) }
963
964 static inline u64 atomic64_read(atomic64_t *v)
965 @@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
966 return result;
967 }
968
969 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
970 +{
971 + u64 result;
972 +
973 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
974 +" ldrexd %0, %H0, [%1]"
975 + : "=&r" (result)
976 + : "r" (&v->counter), "Qo" (v->counter)
977 + );
978 +
979 + return result;
980 +}
981 +
982 static inline void atomic64_set(atomic64_t *v, u64 i)
983 {
984 u64 tmp;
985 @@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
986 : "cc");
987 }
988
989 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
990 +{
991 + u64 tmp;
992 +
993 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
994 +"1: ldrexd %0, %H0, [%2]\n"
995 +" strexd %0, %3, %H3, [%2]\n"
996 +" teq %0, #0\n"
997 +" bne 1b"
998 + : "=&r" (tmp), "=Qo" (v->counter)
999 + : "r" (&v->counter), "r" (i)
1000 + : "cc");
1001 +}
1002 +
1003 static inline void atomic64_add(u64 i, atomic64_t *v)
1004 {
1005 u64 result;
1006 @@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1007 __asm__ __volatile__("@ atomic64_add\n"
1008 "1: ldrexd %0, %H0, [%3]\n"
1009 " adds %0, %0, %4\n"
1010 +" adcs %H0, %H0, %H4\n"
1011 +
1012 +#ifdef CONFIG_PAX_REFCOUNT
1013 +" bvc 3f\n"
1014 +"2: bkpt 0xf103\n"
1015 +"3:\n"
1016 +#endif
1017 +
1018 +" strexd %1, %0, %H0, [%3]\n"
1019 +" teq %1, #0\n"
1020 +" bne 1b"
1021 +
1022 +#ifdef CONFIG_PAX_REFCOUNT
1023 +"\n4:\n"
1024 + _ASM_EXTABLE(2b, 4b)
1025 +#endif
1026 +
1027 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1028 + : "r" (&v->counter), "r" (i)
1029 + : "cc");
1030 +}
1031 +
1032 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1033 +{
1034 + u64 result;
1035 + unsigned long tmp;
1036 +
1037 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1038 +"1: ldrexd %0, %H0, [%3]\n"
1039 +" adds %0, %0, %4\n"
1040 " adc %H0, %H0, %H4\n"
1041 " strexd %1, %0, %H0, [%3]\n"
1042 " teq %1, #0\n"
1043 @@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1044
1045 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1046 {
1047 - u64 result;
1048 - unsigned long tmp;
1049 + u64 result, tmp;
1050
1051 smp_mb();
1052
1053 __asm__ __volatile__("@ atomic64_add_return\n"
1054 +"1: ldrexd %1, %H1, [%3]\n"
1055 +" adds %0, %1, %4\n"
1056 +" adcs %H0, %H1, %H4\n"
1057 +
1058 +#ifdef CONFIG_PAX_REFCOUNT
1059 +" bvc 3f\n"
1060 +" mov %0, %1\n"
1061 +" mov %H0, %H1\n"
1062 +"2: bkpt 0xf103\n"
1063 +"3:\n"
1064 +#endif
1065 +
1066 +" strexd %1, %0, %H0, [%3]\n"
1067 +" teq %1, #0\n"
1068 +" bne 1b"
1069 +
1070 +#ifdef CONFIG_PAX_REFCOUNT
1071 +"\n4:\n"
1072 + _ASM_EXTABLE(2b, 4b)
1073 +#endif
1074 +
1075 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1076 + : "r" (&v->counter), "r" (i)
1077 + : "cc");
1078 +
1079 + smp_mb();
1080 +
1081 + return result;
1082 +}
1083 +
1084 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1085 +{
1086 + u64 result;
1087 + unsigned long tmp;
1088 +
1089 + smp_mb();
1090 +
1091 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1092 "1: ldrexd %0, %H0, [%3]\n"
1093 " adds %0, %0, %4\n"
1094 " adc %H0, %H0, %H4\n"
1095 @@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1096 __asm__ __volatile__("@ atomic64_sub\n"
1097 "1: ldrexd %0, %H0, [%3]\n"
1098 " subs %0, %0, %4\n"
1099 +" sbcs %H0, %H0, %H4\n"
1100 +
1101 +#ifdef CONFIG_PAX_REFCOUNT
1102 +" bvc 3f\n"
1103 +"2: bkpt 0xf103\n"
1104 +"3:\n"
1105 +#endif
1106 +
1107 +" strexd %1, %0, %H0, [%3]\n"
1108 +" teq %1, #0\n"
1109 +" bne 1b"
1110 +
1111 +#ifdef CONFIG_PAX_REFCOUNT
1112 +"\n4:\n"
1113 + _ASM_EXTABLE(2b, 4b)
1114 +#endif
1115 +
1116 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1117 + : "r" (&v->counter), "r" (i)
1118 + : "cc");
1119 +}
1120 +
1121 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1122 +{
1123 + u64 result;
1124 + unsigned long tmp;
1125 +
1126 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1127 +"1: ldrexd %0, %H0, [%3]\n"
1128 +" subs %0, %0, %4\n"
1129 " sbc %H0, %H0, %H4\n"
1130 " strexd %1, %0, %H0, [%3]\n"
1131 " teq %1, #0\n"
1132 @@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1133
1134 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1135 {
1136 - u64 result;
1137 - unsigned long tmp;
1138 + u64 result, tmp;
1139
1140 smp_mb();
1141
1142 __asm__ __volatile__("@ atomic64_sub_return\n"
1143 -"1: ldrexd %0, %H0, [%3]\n"
1144 -" subs %0, %0, %4\n"
1145 -" sbc %H0, %H0, %H4\n"
1146 +"1: ldrexd %1, %H1, [%3]\n"
1147 +" subs %0, %1, %4\n"
1148 +" sbc %H0, %H1, %H4\n"
1149 +
1150 +#ifdef CONFIG_PAX_REFCOUNT
1151 +" bvc 3f\n"
1152 +" mov %0, %1\n"
1153 +" mov %H0, %H1\n"
1154 +"2: bkpt 0xf103\n"
1155 +"3:\n"
1156 +#endif
1157 +
1158 " strexd %1, %0, %H0, [%3]\n"
1159 " teq %1, #0\n"
1160 " bne 1b"
1161 +
1162 +#ifdef CONFIG_PAX_REFCOUNT
1163 +"\n4:\n"
1164 + _ASM_EXTABLE(2b, 4b)
1165 +#endif
1166 +
1167 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1168 : "r" (&v->counter), "r" (i)
1169 : "cc");
1170 @@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1171 return oldval;
1172 }
1173
1174 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1175 +{
1176 + u64 oldval;
1177 + unsigned long res;
1178 +
1179 + smp_mb();
1180 +
1181 + do {
1182 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1183 + "ldrexd %1, %H1, [%3]\n"
1184 + "mov %0, #0\n"
1185 + "teq %1, %4\n"
1186 + "teqeq %H1, %H4\n"
1187 + "strexdeq %0, %5, %H5, [%3]"
1188 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1189 + : "r" (&ptr->counter), "r" (old), "r" (new)
1190 + : "cc");
1191 + } while (res);
1192 +
1193 + smp_mb();
1194 +
1195 + return oldval;
1196 +}
1197 +
1198 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1199 {
1200 u64 result;
1201 @@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1202
1203 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1204 {
1205 - u64 result;
1206 - unsigned long tmp;
1207 + u64 result, tmp;
1208
1209 smp_mb();
1210
1211 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1212 -"1: ldrexd %0, %H0, [%3]\n"
1213 -" subs %0, %0, #1\n"
1214 -" sbc %H0, %H0, #0\n"
1215 +"1: ldrexd %1, %H1, [%3]\n"
1216 +" subs %0, %1, #1\n"
1217 +" sbc %H0, %H1, #0\n"
1218 +
1219 +#ifdef CONFIG_PAX_REFCOUNT
1220 +" bvc 3f\n"
1221 +" mov %0, %1\n"
1222 +" mov %H0, %H1\n"
1223 +"2: bkpt 0xf103\n"
1224 +"3:\n"
1225 +#endif
1226 +
1227 " teq %H0, #0\n"
1228 -" bmi 2f\n"
1229 +" bmi 4f\n"
1230 " strexd %1, %0, %H0, [%3]\n"
1231 " teq %1, #0\n"
1232 " bne 1b\n"
1233 -"2:"
1234 +"4:\n"
1235 +
1236 +#ifdef CONFIG_PAX_REFCOUNT
1237 + _ASM_EXTABLE(2b, 4b)
1238 +#endif
1239 +
1240 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1241 : "r" (&v->counter)
1242 : "cc");
1243 @@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1244 " teq %0, %5\n"
1245 " teqeq %H0, %H5\n"
1246 " moveq %1, #0\n"
1247 -" beq 2f\n"
1248 +" beq 4f\n"
1249 " adds %0, %0, %6\n"
1250 " adc %H0, %H0, %H6\n"
1251 +
1252 +#ifdef CONFIG_PAX_REFCOUNT
1253 +" bvc 3f\n"
1254 +"2: bkpt 0xf103\n"
1255 +"3:\n"
1256 +#endif
1257 +
1258 " strexd %2, %0, %H0, [%4]\n"
1259 " teq %2, #0\n"
1260 " bne 1b\n"
1261 -"2:"
1262 +"4:\n"
1263 +
1264 +#ifdef CONFIG_PAX_REFCOUNT
1265 + _ASM_EXTABLE(2b, 4b)
1266 +#endif
1267 +
1268 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1269 : "r" (&v->counter), "r" (u), "r" (a)
1270 : "cc");
1271 @@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1272
1273 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1274 #define atomic64_inc(v) atomic64_add(1LL, (v))
1275 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1276 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1277 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1278 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1279 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1280 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1281 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1282 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1283 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1284 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1285 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1286 index 75fe66b..2255c86 100644
1287 --- a/arch/arm/include/asm/cache.h
1288 +++ b/arch/arm/include/asm/cache.h
1289 @@ -4,8 +4,10 @@
1290 #ifndef __ASMARM_CACHE_H
1291 #define __ASMARM_CACHE_H
1292
1293 +#include <linux/const.h>
1294 +
1295 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1296 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1297 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1298
1299 /*
1300 * Memory returned by kmalloc() may be used for DMA, so we must make
1301 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1302 index d5d8d5c..ad92c96 100644
1303 --- a/arch/arm/include/asm/cacheflush.h
1304 +++ b/arch/arm/include/asm/cacheflush.h
1305 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1306 void (*dma_unmap_area)(const void *, size_t, int);
1307
1308 void (*dma_flush_range)(const void *, const void *);
1309 -};
1310 +} __no_const;
1311
1312 /*
1313 * Select the calling method
1314 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1315 index 0e9ce8d..6ef1e03 100644
1316 --- a/arch/arm/include/asm/elf.h
1317 +++ b/arch/arm/include/asm/elf.h
1318 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1319 the loader. We need to make sure that it is out of the way of the program
1320 that it will "exec", and that there is sufficient room for the brk. */
1321
1322 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1323 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1324 +
1325 +#ifdef CONFIG_PAX_ASLR
1326 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1327 +
1328 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1329 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1330 +#endif
1331
1332 /* When the program starts, a1 contains a pointer to a function to be
1333 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1334 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1335 extern void elf_set_personality(const struct elf32_hdr *);
1336 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1337
1338 -struct mm_struct;
1339 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1340 -#define arch_randomize_brk arch_randomize_brk
1341 -
1342 extern int vectors_user_mapping(void);
1343 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1344 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1345 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1346 index e51b1e8..32a3113 100644
1347 --- a/arch/arm/include/asm/kmap_types.h
1348 +++ b/arch/arm/include/asm/kmap_types.h
1349 @@ -21,6 +21,7 @@ enum km_type {
1350 KM_L1_CACHE,
1351 KM_L2_CACHE,
1352 KM_KDB,
1353 + KM_CLEARPAGE,
1354 KM_TYPE_NR
1355 };
1356
1357 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1358 index 53426c6..c7baff3 100644
1359 --- a/arch/arm/include/asm/outercache.h
1360 +++ b/arch/arm/include/asm/outercache.h
1361 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1362 #endif
1363 void (*set_debug)(unsigned long);
1364 void (*resume)(void);
1365 -};
1366 +} __no_const;
1367
1368 #ifdef CONFIG_OUTER_CACHE
1369
1370 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1371 index 97b440c..b7ff179 100644
1372 --- a/arch/arm/include/asm/page.h
1373 +++ b/arch/arm/include/asm/page.h
1374 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1375 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1376 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1377 unsigned long vaddr, struct vm_area_struct *vma);
1378 -};
1379 +} __no_const;
1380
1381 #ifdef MULTI_USER
1382 extern struct cpu_user_fns cpu_user;
1383 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1384 index e4c96cc..1145653 100644
1385 --- a/arch/arm/include/asm/system.h
1386 +++ b/arch/arm/include/asm/system.h
1387 @@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1388
1389 #define xchg(ptr,x) \
1390 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1391 +#define xchg_unchecked(ptr,x) \
1392 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1393
1394 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1395
1396 @@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1397
1398 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1399
1400 +#define _ASM_EXTABLE(from, to) \
1401 +" .pushsection __ex_table,\"a\"\n"\
1402 +" .align 3\n" \
1403 +" .long " #from ", " #to"\n" \
1404 +" .popsection"
1405 +
1406 +
1407 #endif /* __ASSEMBLY__ */
1408
1409 #define arch_align_stack(x) (x)
1410 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1411 index 2958976..12ccac4 100644
1412 --- a/arch/arm/include/asm/uaccess.h
1413 +++ b/arch/arm/include/asm/uaccess.h
1414 @@ -22,6 +22,8 @@
1415 #define VERIFY_READ 0
1416 #define VERIFY_WRITE 1
1417
1418 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1419 +
1420 /*
1421 * The exception table consists of pairs of addresses: the first is the
1422 * address of an instruction that is allowed to fault, and the second is
1423 @@ -387,8 +389,23 @@ do { \
1424
1425
1426 #ifdef CONFIG_MMU
1427 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1428 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1429 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1430 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1431 +
1432 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1433 +{
1434 + if (!__builtin_constant_p(n))
1435 + check_object_size(to, n, false);
1436 + return ___copy_from_user(to, from, n);
1437 +}
1438 +
1439 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1440 +{
1441 + if (!__builtin_constant_p(n))
1442 + check_object_size(from, n, true);
1443 + return ___copy_to_user(to, from, n);
1444 +}
1445 +
1446 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1447 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1448 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1449 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1450
1451 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1452 {
1453 + if ((long)n < 0)
1454 + return n;
1455 +
1456 if (access_ok(VERIFY_READ, from, n))
1457 n = __copy_from_user(to, from, n);
1458 else /* security hole - plug it */
1459 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1460
1461 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1462 {
1463 + if ((long)n < 0)
1464 + return n;
1465 +
1466 if (access_ok(VERIFY_WRITE, to, n))
1467 n = __copy_to_user(to, from, n);
1468 return n;
1469 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1470 index 5b0bce6..becd81c 100644
1471 --- a/arch/arm/kernel/armksyms.c
1472 +++ b/arch/arm/kernel/armksyms.c
1473 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1474 #ifdef CONFIG_MMU
1475 EXPORT_SYMBOL(copy_page);
1476
1477 -EXPORT_SYMBOL(__copy_from_user);
1478 -EXPORT_SYMBOL(__copy_to_user);
1479 +EXPORT_SYMBOL(___copy_from_user);
1480 +EXPORT_SYMBOL(___copy_to_user);
1481 EXPORT_SYMBOL(__clear_user);
1482
1483 EXPORT_SYMBOL(__get_user_1);
1484 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1485 index 971d65c..cc936fb 100644
1486 --- a/arch/arm/kernel/process.c
1487 +++ b/arch/arm/kernel/process.c
1488 @@ -28,7 +28,6 @@
1489 #include <linux/tick.h>
1490 #include <linux/utsname.h>
1491 #include <linux/uaccess.h>
1492 -#include <linux/random.h>
1493 #include <linux/hw_breakpoint.h>
1494 #include <linux/cpuidle.h>
1495
1496 @@ -273,9 +272,10 @@ void machine_power_off(void)
1497 machine_shutdown();
1498 if (pm_power_off)
1499 pm_power_off();
1500 + BUG();
1501 }
1502
1503 -void machine_restart(char *cmd)
1504 +__noreturn void machine_restart(char *cmd)
1505 {
1506 machine_shutdown();
1507
1508 @@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1509 return 0;
1510 }
1511
1512 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1513 -{
1514 - unsigned long range_end = mm->brk + 0x02000000;
1515 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1516 -}
1517 -
1518 #ifdef CONFIG_MMU
1519 /*
1520 * The vectors page is always readable from user space for the
1521 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1522 index a255c39..4a19b25 100644
1523 --- a/arch/arm/kernel/setup.c
1524 +++ b/arch/arm/kernel/setup.c
1525 @@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1526 struct cpu_tlb_fns cpu_tlb __read_mostly;
1527 #endif
1528 #ifdef MULTI_USER
1529 -struct cpu_user_fns cpu_user __read_mostly;
1530 +struct cpu_user_fns cpu_user __read_only;
1531 #endif
1532 #ifdef MULTI_CACHE
1533 -struct cpu_cache_fns cpu_cache __read_mostly;
1534 +struct cpu_cache_fns cpu_cache __read_only;
1535 #endif
1536 #ifdef CONFIG_OUTER_CACHE
1537 -struct outer_cache_fns outer_cache __read_mostly;
1538 +struct outer_cache_fns outer_cache __read_only;
1539 EXPORT_SYMBOL(outer_cache);
1540 #endif
1541
1542 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1543 index f84dfe6..13e94f7 100644
1544 --- a/arch/arm/kernel/traps.c
1545 +++ b/arch/arm/kernel/traps.c
1546 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1547
1548 static DEFINE_RAW_SPINLOCK(die_lock);
1549
1550 +extern void gr_handle_kernel_exploit(void);
1551 +
1552 /*
1553 * This function is protected against re-entrancy.
1554 */
1555 @@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1556 panic("Fatal exception in interrupt");
1557 if (panic_on_oops)
1558 panic("Fatal exception");
1559 +
1560 + gr_handle_kernel_exploit();
1561 +
1562 if (ret != NOTIFY_STOP)
1563 do_exit(SIGSEGV);
1564 }
1565 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1566 index 66a477a..bee61d3 100644
1567 --- a/arch/arm/lib/copy_from_user.S
1568 +++ b/arch/arm/lib/copy_from_user.S
1569 @@ -16,7 +16,7 @@
1570 /*
1571 * Prototype:
1572 *
1573 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1574 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1575 *
1576 * Purpose:
1577 *
1578 @@ -84,11 +84,11 @@
1579
1580 .text
1581
1582 -ENTRY(__copy_from_user)
1583 +ENTRY(___copy_from_user)
1584
1585 #include "copy_template.S"
1586
1587 -ENDPROC(__copy_from_user)
1588 +ENDPROC(___copy_from_user)
1589
1590 .pushsection .fixup,"ax"
1591 .align 0
1592 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1593 index 6ee2f67..d1cce76 100644
1594 --- a/arch/arm/lib/copy_page.S
1595 +++ b/arch/arm/lib/copy_page.S
1596 @@ -10,6 +10,7 @@
1597 * ASM optimised string functions
1598 */
1599 #include <linux/linkage.h>
1600 +#include <linux/const.h>
1601 #include <asm/assembler.h>
1602 #include <asm/asm-offsets.h>
1603 #include <asm/cache.h>
1604 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1605 index d066df6..df28194 100644
1606 --- a/arch/arm/lib/copy_to_user.S
1607 +++ b/arch/arm/lib/copy_to_user.S
1608 @@ -16,7 +16,7 @@
1609 /*
1610 * Prototype:
1611 *
1612 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1613 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1614 *
1615 * Purpose:
1616 *
1617 @@ -88,11 +88,11 @@
1618 .text
1619
1620 ENTRY(__copy_to_user_std)
1621 -WEAK(__copy_to_user)
1622 +WEAK(___copy_to_user)
1623
1624 #include "copy_template.S"
1625
1626 -ENDPROC(__copy_to_user)
1627 +ENDPROC(___copy_to_user)
1628 ENDPROC(__copy_to_user_std)
1629
1630 .pushsection .fixup,"ax"
1631 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1632 index 5c908b1..e712687 100644
1633 --- a/arch/arm/lib/uaccess.S
1634 +++ b/arch/arm/lib/uaccess.S
1635 @@ -20,7 +20,7 @@
1636
1637 #define PAGE_SHIFT 12
1638
1639 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1640 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1641 * Purpose : copy a block to user memory from kernel memory
1642 * Params : to - user memory
1643 * : from - kernel memory
1644 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1645 sub r2, r2, ip
1646 b .Lc2u_dest_aligned
1647
1648 -ENTRY(__copy_to_user)
1649 +ENTRY(___copy_to_user)
1650 stmfd sp!, {r2, r4 - r7, lr}
1651 cmp r2, #4
1652 blt .Lc2u_not_enough
1653 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1654 ldrgtb r3, [r1], #0
1655 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1656 b .Lc2u_finished
1657 -ENDPROC(__copy_to_user)
1658 +ENDPROC(___copy_to_user)
1659
1660 .pushsection .fixup,"ax"
1661 .align 0
1662 9001: ldmfd sp!, {r0, r4 - r7, pc}
1663 .popsection
1664
1665 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1666 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1667 * Purpose : copy a block from user memory to kernel memory
1668 * Params : to - kernel memory
1669 * : from - user memory
1670 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1671 sub r2, r2, ip
1672 b .Lcfu_dest_aligned
1673
1674 -ENTRY(__copy_from_user)
1675 +ENTRY(___copy_from_user)
1676 stmfd sp!, {r0, r2, r4 - r7, lr}
1677 cmp r2, #4
1678 blt .Lcfu_not_enough
1679 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1680 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1681 strgtb r3, [r0], #1
1682 b .Lcfu_finished
1683 -ENDPROC(__copy_from_user)
1684 +ENDPROC(___copy_from_user)
1685
1686 .pushsection .fixup,"ax"
1687 .align 0
1688 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1689 index 025f742..8432b08 100644
1690 --- a/arch/arm/lib/uaccess_with_memcpy.c
1691 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1692 @@ -104,7 +104,7 @@ out:
1693 }
1694
1695 unsigned long
1696 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1697 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1698 {
1699 /*
1700 * This test is stubbed out of the main function above to keep
1701 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1702 index 6722627..8f97548c 100644
1703 --- a/arch/arm/mach-omap2/board-n8x0.c
1704 +++ b/arch/arm/mach-omap2/board-n8x0.c
1705 @@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1706 }
1707 #endif
1708
1709 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1710 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1711 .late_init = n8x0_menelaus_late_init,
1712 };
1713
1714 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1715 index 2b2d51c..0127490 100644
1716 --- a/arch/arm/mach-ux500/mbox-db5500.c
1717 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1718 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1719 return sprintf(buf, "0x%X\n", mbox_value);
1720 }
1721
1722 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1723 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1724
1725 static int mbox_show(struct seq_file *s, void *data)
1726 {
1727 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1728 index bb7eac3..3bade16 100644
1729 --- a/arch/arm/mm/fault.c
1730 +++ b/arch/arm/mm/fault.c
1731 @@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1732 }
1733 #endif
1734
1735 +#ifdef CONFIG_PAX_PAGEEXEC
1736 + if (fsr & FSR_LNX_PF) {
1737 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1738 + do_group_exit(SIGKILL);
1739 + }
1740 +#endif
1741 +
1742 tsk->thread.address = addr;
1743 tsk->thread.error_code = fsr;
1744 tsk->thread.trap_no = 14;
1745 @@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1746 }
1747 #endif /* CONFIG_MMU */
1748
1749 +#ifdef CONFIG_PAX_PAGEEXEC
1750 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1751 +{
1752 + long i;
1753 +
1754 + printk(KERN_ERR "PAX: bytes at PC: ");
1755 + for (i = 0; i < 20; i++) {
1756 + unsigned char c;
1757 + if (get_user(c, (__force unsigned char __user *)pc+i))
1758 + printk(KERN_CONT "?? ");
1759 + else
1760 + printk(KERN_CONT "%02x ", c);
1761 + }
1762 + printk("\n");
1763 +
1764 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1765 + for (i = -1; i < 20; i++) {
1766 + unsigned long c;
1767 + if (get_user(c, (__force unsigned long __user *)sp+i))
1768 + printk(KERN_CONT "???????? ");
1769 + else
1770 + printk(KERN_CONT "%08lx ", c);
1771 + }
1772 + printk("\n");
1773 +}
1774 +#endif
1775 +
1776 /*
1777 * First Level Translation Fault Handler
1778 *
1779 @@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1780 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1781 struct siginfo info;
1782
1783 +#ifdef CONFIG_PAX_REFCOUNT
1784 + if (fsr_fs(ifsr) == 2) {
1785 + unsigned int bkpt;
1786 +
1787 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1788 + current->thread.error_code = ifsr;
1789 + current->thread.trap_no = 0;
1790 + pax_report_refcount_overflow(regs);
1791 + fixup_exception(regs);
1792 + return;
1793 + }
1794 + }
1795 +#endif
1796 +
1797 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1798 return;
1799
1800 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1801 index ce8cb19..3ec539d 100644
1802 --- a/arch/arm/mm/mmap.c
1803 +++ b/arch/arm/mm/mmap.c
1804 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1805 if (len > TASK_SIZE)
1806 return -ENOMEM;
1807
1808 +#ifdef CONFIG_PAX_RANDMMAP
1809 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1810 +#endif
1811 +
1812 if (addr) {
1813 if (do_align)
1814 addr = COLOUR_ALIGN(addr, pgoff);
1815 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1816 addr = PAGE_ALIGN(addr);
1817
1818 vma = find_vma(mm, addr);
1819 - if (TASK_SIZE - len >= addr &&
1820 - (!vma || addr + len <= vma->vm_start))
1821 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1822 return addr;
1823 }
1824 if (len > mm->cached_hole_size) {
1825 - start_addr = addr = mm->free_area_cache;
1826 + start_addr = addr = mm->free_area_cache;
1827 } else {
1828 - start_addr = addr = mm->mmap_base;
1829 - mm->cached_hole_size = 0;
1830 + start_addr = addr = mm->mmap_base;
1831 + mm->cached_hole_size = 0;
1832 }
1833
1834 full_search:
1835 @@ -124,14 +127,14 @@ full_search:
1836 * Start a new search - just in case we missed
1837 * some holes.
1838 */
1839 - if (start_addr != TASK_UNMAPPED_BASE) {
1840 - start_addr = addr = TASK_UNMAPPED_BASE;
1841 + if (start_addr != mm->mmap_base) {
1842 + start_addr = addr = mm->mmap_base;
1843 mm->cached_hole_size = 0;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848 - if (!vma || addr + len <= vma->vm_start) {
1849 + if (check_heap_stack_gap(vma, addr, len)) {
1850 /*
1851 * Remember the place where we stopped the search:
1852 */
1853 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1854
1855 if (mmap_is_legacy()) {
1856 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1857 +
1858 +#ifdef CONFIG_PAX_RANDMMAP
1859 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1860 + mm->mmap_base += mm->delta_mmap;
1861 +#endif
1862 +
1863 mm->get_unmapped_area = arch_get_unmapped_area;
1864 mm->unmap_area = arch_unmap_area;
1865 } else {
1866 mm->mmap_base = mmap_base(random_factor);
1867 +
1868 +#ifdef CONFIG_PAX_RANDMMAP
1869 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1870 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1871 +#endif
1872 +
1873 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1874 mm->unmap_area = arch_unmap_area_topdown;
1875 }
1876 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1877 index 71a6827..e7fbc23 100644
1878 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1879 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1880 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1881 int (*started)(unsigned ch);
1882 int (*flush)(unsigned ch);
1883 int (*stop)(unsigned ch);
1884 -};
1885 +} __no_const;
1886
1887 extern void *samsung_dmadev_get_ops(void);
1888 extern void *s3c_dma_get_ops(void);
1889 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1890 index 5f28cae..3d23723 100644
1891 --- a/arch/arm/plat-samsung/include/plat/ehci.h
1892 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
1893 @@ -14,7 +14,7 @@
1894 struct s5p_ehci_platdata {
1895 int (*phy_init)(struct platform_device *pdev, int type);
1896 int (*phy_exit)(struct platform_device *pdev, int type);
1897 -};
1898 +} __no_const;
1899
1900 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1901
1902 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1903 index c3a58a1..78fbf54 100644
1904 --- a/arch/avr32/include/asm/cache.h
1905 +++ b/arch/avr32/include/asm/cache.h
1906 @@ -1,8 +1,10 @@
1907 #ifndef __ASM_AVR32_CACHE_H
1908 #define __ASM_AVR32_CACHE_H
1909
1910 +#include <linux/const.h>
1911 +
1912 #define L1_CACHE_SHIFT 5
1913 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1914 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1915
1916 /*
1917 * Memory returned by kmalloc() may be used for DMA, so we must make
1918 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1919 index 3b3159b..425ea94 100644
1920 --- a/arch/avr32/include/asm/elf.h
1921 +++ b/arch/avr32/include/asm/elf.h
1922 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1923 the loader. We need to make sure that it is out of the way of the program
1924 that it will "exec", and that there is sufficient room for the brk. */
1925
1926 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1927 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1928
1929 +#ifdef CONFIG_PAX_ASLR
1930 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1931 +
1932 +#define PAX_DELTA_MMAP_LEN 15
1933 +#define PAX_DELTA_STACK_LEN 15
1934 +#endif
1935
1936 /* This yields a mask that user programs can use to figure out what
1937 instruction set this CPU supports. This could be done in user space,
1938 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1939 index b7f5c68..556135c 100644
1940 --- a/arch/avr32/include/asm/kmap_types.h
1941 +++ b/arch/avr32/include/asm/kmap_types.h
1942 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1943 D(11) KM_IRQ1,
1944 D(12) KM_SOFTIRQ0,
1945 D(13) KM_SOFTIRQ1,
1946 -D(14) KM_TYPE_NR
1947 +D(14) KM_CLEARPAGE,
1948 +D(15) KM_TYPE_NR
1949 };
1950
1951 #undef D
1952 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1953 index f7040a1..db9f300 100644
1954 --- a/arch/avr32/mm/fault.c
1955 +++ b/arch/avr32/mm/fault.c
1956 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1957
1958 int exception_trace = 1;
1959
1960 +#ifdef CONFIG_PAX_PAGEEXEC
1961 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1962 +{
1963 + unsigned long i;
1964 +
1965 + printk(KERN_ERR "PAX: bytes at PC: ");
1966 + for (i = 0; i < 20; i++) {
1967 + unsigned char c;
1968 + if (get_user(c, (unsigned char *)pc+i))
1969 + printk(KERN_CONT "???????? ");
1970 + else
1971 + printk(KERN_CONT "%02x ", c);
1972 + }
1973 + printk("\n");
1974 +}
1975 +#endif
1976 +
1977 /*
1978 * This routine handles page faults. It determines the address and the
1979 * problem, and then passes it off to one of the appropriate routines.
1980 @@ -156,6 +173,16 @@ bad_area:
1981 up_read(&mm->mmap_sem);
1982
1983 if (user_mode(regs)) {
1984 +
1985 +#ifdef CONFIG_PAX_PAGEEXEC
1986 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1987 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1988 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1989 + do_group_exit(SIGKILL);
1990 + }
1991 + }
1992 +#endif
1993 +
1994 if (exception_trace && printk_ratelimit())
1995 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1996 "sp %08lx ecr %lu\n",
1997 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1998 index 568885a..f8008df 100644
1999 --- a/arch/blackfin/include/asm/cache.h
2000 +++ b/arch/blackfin/include/asm/cache.h
2001 @@ -7,6 +7,7 @@
2002 #ifndef __ARCH_BLACKFIN_CACHE_H
2003 #define __ARCH_BLACKFIN_CACHE_H
2004
2005 +#include <linux/const.h>
2006 #include <linux/linkage.h> /* for asmlinkage */
2007
2008 /*
2009 @@ -14,7 +15,7 @@
2010 * Blackfin loads 32 bytes for cache
2011 */
2012 #define L1_CACHE_SHIFT 5
2013 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2014 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2015 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2016
2017 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2018 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2019 index aea2718..3639a60 100644
2020 --- a/arch/cris/include/arch-v10/arch/cache.h
2021 +++ b/arch/cris/include/arch-v10/arch/cache.h
2022 @@ -1,8 +1,9 @@
2023 #ifndef _ASM_ARCH_CACHE_H
2024 #define _ASM_ARCH_CACHE_H
2025
2026 +#include <linux/const.h>
2027 /* Etrax 100LX have 32-byte cache-lines. */
2028 -#define L1_CACHE_BYTES 32
2029 #define L1_CACHE_SHIFT 5
2030 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2031
2032 #endif /* _ASM_ARCH_CACHE_H */
2033 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2034 index 1de779f..336fad3 100644
2035 --- a/arch/cris/include/arch-v32/arch/cache.h
2036 +++ b/arch/cris/include/arch-v32/arch/cache.h
2037 @@ -1,11 +1,12 @@
2038 #ifndef _ASM_CRIS_ARCH_CACHE_H
2039 #define _ASM_CRIS_ARCH_CACHE_H
2040
2041 +#include <linux/const.h>
2042 #include <arch/hwregs/dma.h>
2043
2044 /* A cache-line is 32 bytes. */
2045 -#define L1_CACHE_BYTES 32
2046 #define L1_CACHE_SHIFT 5
2047 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2048
2049 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2050
2051 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2052 index 0d8a7d6..d0c9ff5 100644
2053 --- a/arch/frv/include/asm/atomic.h
2054 +++ b/arch/frv/include/asm/atomic.h
2055 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2056 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2057 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2058
2059 +#define atomic64_read_unchecked(v) atomic64_read(v)
2060 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2061 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2062 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2063 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2064 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2065 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2066 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2067 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2068 +
2069 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2070 {
2071 int c, old;
2072 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2073 index 2797163..c2a401d 100644
2074 --- a/arch/frv/include/asm/cache.h
2075 +++ b/arch/frv/include/asm/cache.h
2076 @@ -12,10 +12,11 @@
2077 #ifndef __ASM_CACHE_H
2078 #define __ASM_CACHE_H
2079
2080 +#include <linux/const.h>
2081
2082 /* bytes per L1 cache line */
2083 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2084 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2085 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2086
2087 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2088 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2089 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2090 index f8e16b2..c73ff79 100644
2091 --- a/arch/frv/include/asm/kmap_types.h
2092 +++ b/arch/frv/include/asm/kmap_types.h
2093 @@ -23,6 +23,7 @@ enum km_type {
2094 KM_IRQ1,
2095 KM_SOFTIRQ0,
2096 KM_SOFTIRQ1,
2097 + KM_CLEARPAGE,
2098 KM_TYPE_NR
2099 };
2100
2101 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2102 index 385fd30..6c3d97e 100644
2103 --- a/arch/frv/mm/elf-fdpic.c
2104 +++ b/arch/frv/mm/elf-fdpic.c
2105 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2106 if (addr) {
2107 addr = PAGE_ALIGN(addr);
2108 vma = find_vma(current->mm, addr);
2109 - if (TASK_SIZE - len >= addr &&
2110 - (!vma || addr + len <= vma->vm_start))
2111 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2112 goto success;
2113 }
2114
2115 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2116 for (; vma; vma = vma->vm_next) {
2117 if (addr > limit)
2118 break;
2119 - if (addr + len <= vma->vm_start)
2120 + if (check_heap_stack_gap(vma, addr, len))
2121 goto success;
2122 addr = vma->vm_end;
2123 }
2124 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2125 for (; vma; vma = vma->vm_next) {
2126 if (addr > limit)
2127 break;
2128 - if (addr + len <= vma->vm_start)
2129 + if (check_heap_stack_gap(vma, addr, len))
2130 goto success;
2131 addr = vma->vm_end;
2132 }
2133 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2134 index c635028..6d9445a 100644
2135 --- a/arch/h8300/include/asm/cache.h
2136 +++ b/arch/h8300/include/asm/cache.h
2137 @@ -1,8 +1,10 @@
2138 #ifndef __ARCH_H8300_CACHE_H
2139 #define __ARCH_H8300_CACHE_H
2140
2141 +#include <linux/const.h>
2142 +
2143 /* bytes per L1 cache line */
2144 -#define L1_CACHE_BYTES 4
2145 +#define L1_CACHE_BYTES _AC(4,UL)
2146
2147 /* m68k-elf-gcc 2.95.2 doesn't like these */
2148
2149 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2150 index 0f01de2..d37d309 100644
2151 --- a/arch/hexagon/include/asm/cache.h
2152 +++ b/arch/hexagon/include/asm/cache.h
2153 @@ -21,9 +21,11 @@
2154 #ifndef __ASM_CACHE_H
2155 #define __ASM_CACHE_H
2156
2157 +#include <linux/const.h>
2158 +
2159 /* Bytes per L1 cache line */
2160 -#define L1_CACHE_SHIFT (5)
2161 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2162 +#define L1_CACHE_SHIFT 5
2163 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2164
2165 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2166 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2167 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2168 index 3fad89e..3047da5 100644
2169 --- a/arch/ia64/include/asm/atomic.h
2170 +++ b/arch/ia64/include/asm/atomic.h
2171 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2172 #define atomic64_inc(v) atomic64_add(1, (v))
2173 #define atomic64_dec(v) atomic64_sub(1, (v))
2174
2175 +#define atomic64_read_unchecked(v) atomic64_read(v)
2176 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2177 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2178 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2179 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2180 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2181 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2182 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2183 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2184 +
2185 /* Atomic operations are already serializing */
2186 #define smp_mb__before_atomic_dec() barrier()
2187 #define smp_mb__after_atomic_dec() barrier()
2188 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2189 index 988254a..e1ee885 100644
2190 --- a/arch/ia64/include/asm/cache.h
2191 +++ b/arch/ia64/include/asm/cache.h
2192 @@ -1,6 +1,7 @@
2193 #ifndef _ASM_IA64_CACHE_H
2194 #define _ASM_IA64_CACHE_H
2195
2196 +#include <linux/const.h>
2197
2198 /*
2199 * Copyright (C) 1998-2000 Hewlett-Packard Co
2200 @@ -9,7 +10,7 @@
2201
2202 /* Bytes per L1 (data) cache line. */
2203 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2204 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2205 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2206
2207 #ifdef CONFIG_SMP
2208 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2209 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2210 index b5298eb..67c6e62 100644
2211 --- a/arch/ia64/include/asm/elf.h
2212 +++ b/arch/ia64/include/asm/elf.h
2213 @@ -42,6 +42,13 @@
2214 */
2215 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2216
2217 +#ifdef CONFIG_PAX_ASLR
2218 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2219 +
2220 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2221 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2222 +#endif
2223 +
2224 #define PT_IA_64_UNWIND 0x70000001
2225
2226 /* IA-64 relocations: */
2227 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2228 index 1a97af3..7529d31 100644
2229 --- a/arch/ia64/include/asm/pgtable.h
2230 +++ b/arch/ia64/include/asm/pgtable.h
2231 @@ -12,7 +12,7 @@
2232 * David Mosberger-Tang <davidm@hpl.hp.com>
2233 */
2234
2235 -
2236 +#include <linux/const.h>
2237 #include <asm/mman.h>
2238 #include <asm/page.h>
2239 #include <asm/processor.h>
2240 @@ -143,6 +143,17 @@
2241 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2242 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2243 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2244 +
2245 +#ifdef CONFIG_PAX_PAGEEXEC
2246 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2247 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2248 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2249 +#else
2250 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2251 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2252 +# define PAGE_COPY_NOEXEC PAGE_COPY
2253 +#endif
2254 +
2255 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2256 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2257 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2258 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2259 index b77768d..e0795eb 100644
2260 --- a/arch/ia64/include/asm/spinlock.h
2261 +++ b/arch/ia64/include/asm/spinlock.h
2262 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2263 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2264
2265 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2266 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2267 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2268 }
2269
2270 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2271 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2272 index 449c8c0..432a3d2 100644
2273 --- a/arch/ia64/include/asm/uaccess.h
2274 +++ b/arch/ia64/include/asm/uaccess.h
2275 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2276 const void *__cu_from = (from); \
2277 long __cu_len = (n); \
2278 \
2279 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2280 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2281 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2282 __cu_len; \
2283 })
2284 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2285 long __cu_len = (n); \
2286 \
2287 __chk_user_ptr(__cu_from); \
2288 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2289 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2290 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2291 __cu_len; \
2292 })
2293 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2294 index 24603be..948052d 100644
2295 --- a/arch/ia64/kernel/module.c
2296 +++ b/arch/ia64/kernel/module.c
2297 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2298 void
2299 module_free (struct module *mod, void *module_region)
2300 {
2301 - if (mod && mod->arch.init_unw_table &&
2302 - module_region == mod->module_init) {
2303 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2304 unw_remove_unwind_table(mod->arch.init_unw_table);
2305 mod->arch.init_unw_table = NULL;
2306 }
2307 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2308 }
2309
2310 static inline int
2311 +in_init_rx (const struct module *mod, uint64_t addr)
2312 +{
2313 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2314 +}
2315 +
2316 +static inline int
2317 +in_init_rw (const struct module *mod, uint64_t addr)
2318 +{
2319 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2320 +}
2321 +
2322 +static inline int
2323 in_init (const struct module *mod, uint64_t addr)
2324 {
2325 - return addr - (uint64_t) mod->module_init < mod->init_size;
2326 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2327 +}
2328 +
2329 +static inline int
2330 +in_core_rx (const struct module *mod, uint64_t addr)
2331 +{
2332 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2333 +}
2334 +
2335 +static inline int
2336 +in_core_rw (const struct module *mod, uint64_t addr)
2337 +{
2338 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2339 }
2340
2341 static inline int
2342 in_core (const struct module *mod, uint64_t addr)
2343 {
2344 - return addr - (uint64_t) mod->module_core < mod->core_size;
2345 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2346 }
2347
2348 static inline int
2349 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2350 break;
2351
2352 case RV_BDREL:
2353 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2354 + if (in_init_rx(mod, val))
2355 + val -= (uint64_t) mod->module_init_rx;
2356 + else if (in_init_rw(mod, val))
2357 + val -= (uint64_t) mod->module_init_rw;
2358 + else if (in_core_rx(mod, val))
2359 + val -= (uint64_t) mod->module_core_rx;
2360 + else if (in_core_rw(mod, val))
2361 + val -= (uint64_t) mod->module_core_rw;
2362 break;
2363
2364 case RV_LTV:
2365 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2366 * addresses have been selected...
2367 */
2368 uint64_t gp;
2369 - if (mod->core_size > MAX_LTOFF)
2370 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2371 /*
2372 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2373 * at the end of the module.
2374 */
2375 - gp = mod->core_size - MAX_LTOFF / 2;
2376 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2377 else
2378 - gp = mod->core_size / 2;
2379 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2380 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2381 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2382 mod->arch.gp = gp;
2383 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2384 }
2385 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2386 index 609d500..7dde2a8 100644
2387 --- a/arch/ia64/kernel/sys_ia64.c
2388 +++ b/arch/ia64/kernel/sys_ia64.c
2389 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2390 if (REGION_NUMBER(addr) == RGN_HPAGE)
2391 addr = 0;
2392 #endif
2393 +
2394 +#ifdef CONFIG_PAX_RANDMMAP
2395 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2396 + addr = mm->free_area_cache;
2397 + else
2398 +#endif
2399 +
2400 if (!addr)
2401 addr = mm->free_area_cache;
2402
2403 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2404 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2405 /* At this point: (!vma || addr < vma->vm_end). */
2406 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2407 - if (start_addr != TASK_UNMAPPED_BASE) {
2408 + if (start_addr != mm->mmap_base) {
2409 /* Start a new search --- just in case we missed some holes. */
2410 - addr = TASK_UNMAPPED_BASE;
2411 + addr = mm->mmap_base;
2412 goto full_search;
2413 }
2414 return -ENOMEM;
2415 }
2416 - if (!vma || addr + len <= vma->vm_start) {
2417 + if (check_heap_stack_gap(vma, addr, len)) {
2418 /* Remember the address where we stopped this search: */
2419 mm->free_area_cache = addr + len;
2420 return addr;
2421 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2422 index 53c0ba0..2accdde 100644
2423 --- a/arch/ia64/kernel/vmlinux.lds.S
2424 +++ b/arch/ia64/kernel/vmlinux.lds.S
2425 @@ -199,7 +199,7 @@ SECTIONS {
2426 /* Per-cpu data: */
2427 . = ALIGN(PERCPU_PAGE_SIZE);
2428 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2429 - __phys_per_cpu_start = __per_cpu_load;
2430 + __phys_per_cpu_start = per_cpu_load;
2431 /*
2432 * ensure percpu data fits
2433 * into percpu page size
2434 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2435 index 20b3593..1ce77f0 100644
2436 --- a/arch/ia64/mm/fault.c
2437 +++ b/arch/ia64/mm/fault.c
2438 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2439 return pte_present(pte);
2440 }
2441
2442 +#ifdef CONFIG_PAX_PAGEEXEC
2443 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2444 +{
2445 + unsigned long i;
2446 +
2447 + printk(KERN_ERR "PAX: bytes at PC: ");
2448 + for (i = 0; i < 8; i++) {
2449 + unsigned int c;
2450 + if (get_user(c, (unsigned int *)pc+i))
2451 + printk(KERN_CONT "???????? ");
2452 + else
2453 + printk(KERN_CONT "%08x ", c);
2454 + }
2455 + printk("\n");
2456 +}
2457 +#endif
2458 +
2459 void __kprobes
2460 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2461 {
2462 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2463 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2464 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2465
2466 - if ((vma->vm_flags & mask) != mask)
2467 + if ((vma->vm_flags & mask) != mask) {
2468 +
2469 +#ifdef CONFIG_PAX_PAGEEXEC
2470 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2471 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2472 + goto bad_area;
2473 +
2474 + up_read(&mm->mmap_sem);
2475 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2476 + do_group_exit(SIGKILL);
2477 + }
2478 +#endif
2479 +
2480 goto bad_area;
2481
2482 + }
2483 +
2484 /*
2485 * If for any reason at all we couldn't handle the fault, make
2486 * sure we exit gracefully rather than endlessly redo the
2487 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2488 index 5ca674b..e0e1b70 100644
2489 --- a/arch/ia64/mm/hugetlbpage.c
2490 +++ b/arch/ia64/mm/hugetlbpage.c
2491 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2492 /* At this point: (!vmm || addr < vmm->vm_end). */
2493 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2494 return -ENOMEM;
2495 - if (!vmm || (addr + len) <= vmm->vm_start)
2496 + if (check_heap_stack_gap(vmm, addr, len))
2497 return addr;
2498 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2499 }
2500 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2501 index 13df239d..cb52116 100644
2502 --- a/arch/ia64/mm/init.c
2503 +++ b/arch/ia64/mm/init.c
2504 @@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2505 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2506 vma->vm_end = vma->vm_start + PAGE_SIZE;
2507 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2508 +
2509 +#ifdef CONFIG_PAX_PAGEEXEC
2510 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2511 + vma->vm_flags &= ~VM_EXEC;
2512 +
2513 +#ifdef CONFIG_PAX_MPROTECT
2514 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2515 + vma->vm_flags &= ~VM_MAYEXEC;
2516 +#endif
2517 +
2518 + }
2519 +#endif
2520 +
2521 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2522 down_write(&current->mm->mmap_sem);
2523 if (insert_vm_struct(current->mm, vma)) {
2524 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2525 index 40b3ee9..8c2c112 100644
2526 --- a/arch/m32r/include/asm/cache.h
2527 +++ b/arch/m32r/include/asm/cache.h
2528 @@ -1,8 +1,10 @@
2529 #ifndef _ASM_M32R_CACHE_H
2530 #define _ASM_M32R_CACHE_H
2531
2532 +#include <linux/const.h>
2533 +
2534 /* L1 cache line size */
2535 #define L1_CACHE_SHIFT 4
2536 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2537 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2538
2539 #endif /* _ASM_M32R_CACHE_H */
2540 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2541 index 82abd15..d95ae5d 100644
2542 --- a/arch/m32r/lib/usercopy.c
2543 +++ b/arch/m32r/lib/usercopy.c
2544 @@ -14,6 +14,9 @@
2545 unsigned long
2546 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2547 {
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 prefetch(from);
2552 if (access_ok(VERIFY_WRITE, to, n))
2553 __copy_user(to,from,n);
2554 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2555 unsigned long
2556 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2557 {
2558 + if ((long)n < 0)
2559 + return n;
2560 +
2561 prefetchw(to);
2562 if (access_ok(VERIFY_READ, from, n))
2563 __copy_user_zeroing(to,from,n);
2564 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2565 index 0395c51..5f26031 100644
2566 --- a/arch/m68k/include/asm/cache.h
2567 +++ b/arch/m68k/include/asm/cache.h
2568 @@ -4,9 +4,11 @@
2569 #ifndef __ARCH_M68K_CACHE_H
2570 #define __ARCH_M68K_CACHE_H
2571
2572 +#include <linux/const.h>
2573 +
2574 /* bytes per L1 cache line */
2575 #define L1_CACHE_SHIFT 4
2576 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2577 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2578
2579 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2580
2581 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2582 index 4efe96a..60e8699 100644
2583 --- a/arch/microblaze/include/asm/cache.h
2584 +++ b/arch/microblaze/include/asm/cache.h
2585 @@ -13,11 +13,12 @@
2586 #ifndef _ASM_MICROBLAZE_CACHE_H
2587 #define _ASM_MICROBLAZE_CACHE_H
2588
2589 +#include <linux/const.h>
2590 #include <asm/registers.h>
2591
2592 #define L1_CACHE_SHIFT 5
2593 /* word-granular cache in microblaze */
2594 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2595 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2596
2597 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2598
2599 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2600 index 1d93f81..67794d0 100644
2601 --- a/arch/mips/include/asm/atomic.h
2602 +++ b/arch/mips/include/asm/atomic.h
2603 @@ -21,6 +21,10 @@
2604 #include <asm/war.h>
2605 #include <asm/system.h>
2606
2607 +#ifdef CONFIG_GENERIC_ATOMIC64
2608 +#include <asm-generic/atomic64.h>
2609 +#endif
2610 +
2611 #define ATOMIC_INIT(i) { (i) }
2612
2613 /*
2614 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2615 */
2616 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2617
2618 +#define atomic64_read_unchecked(v) atomic64_read(v)
2619 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2620 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2621 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2622 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2623 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2624 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2625 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2626 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2627 +
2628 #endif /* CONFIG_64BIT */
2629
2630 /*
2631 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2632 index b4db69f..8f3b093 100644
2633 --- a/arch/mips/include/asm/cache.h
2634 +++ b/arch/mips/include/asm/cache.h
2635 @@ -9,10 +9,11 @@
2636 #ifndef _ASM_CACHE_H
2637 #define _ASM_CACHE_H
2638
2639 +#include <linux/const.h>
2640 #include <kmalloc.h>
2641
2642 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2643 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2644 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2645
2646 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2647 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2648 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2649 index 455c0ac..ad65fbe 100644
2650 --- a/arch/mips/include/asm/elf.h
2651 +++ b/arch/mips/include/asm/elf.h
2652 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2653 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2654 #endif
2655
2656 +#ifdef CONFIG_PAX_ASLR
2657 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2658 +
2659 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2660 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2661 +#endif
2662 +
2663 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2664 struct linux_binprm;
2665 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2666 int uses_interp);
2667
2668 -struct mm_struct;
2669 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2670 -#define arch_randomize_brk arch_randomize_brk
2671 -
2672 #endif /* _ASM_ELF_H */
2673 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2674 index da9bd7d..91aa7ab 100644
2675 --- a/arch/mips/include/asm/page.h
2676 +++ b/arch/mips/include/asm/page.h
2677 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2678 #ifdef CONFIG_CPU_MIPS32
2679 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2680 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2681 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2682 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2683 #else
2684 typedef struct { unsigned long long pte; } pte_t;
2685 #define pte_val(x) ((x).pte)
2686 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2687 index 6018c80..7c37203 100644
2688 --- a/arch/mips/include/asm/system.h
2689 +++ b/arch/mips/include/asm/system.h
2690 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2691 */
2692 #define __ARCH_WANT_UNLOCKED_CTXSW
2693
2694 -extern unsigned long arch_align_stack(unsigned long sp);
2695 +#define arch_align_stack(x) ((x) & ~0xfUL)
2696
2697 #endif /* _ASM_SYSTEM_H */
2698 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2699 index 9fdd8bc..4bd7f1a 100644
2700 --- a/arch/mips/kernel/binfmt_elfn32.c
2701 +++ b/arch/mips/kernel/binfmt_elfn32.c
2702 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2703 #undef ELF_ET_DYN_BASE
2704 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2705
2706 +#ifdef CONFIG_PAX_ASLR
2707 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2708 +
2709 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2710 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2711 +#endif
2712 +
2713 #include <asm/processor.h>
2714 #include <linux/module.h>
2715 #include <linux/elfcore.h>
2716 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2717 index ff44823..97f8906 100644
2718 --- a/arch/mips/kernel/binfmt_elfo32.c
2719 +++ b/arch/mips/kernel/binfmt_elfo32.c
2720 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2721 #undef ELF_ET_DYN_BASE
2722 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2723
2724 +#ifdef CONFIG_PAX_ASLR
2725 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2726 +
2727 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2728 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2729 +#endif
2730 +
2731 #include <asm/processor.h>
2732
2733 /*
2734 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2735 index 7955409..ceaea7c 100644
2736 --- a/arch/mips/kernel/process.c
2737 +++ b/arch/mips/kernel/process.c
2738 @@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2739 out:
2740 return pc;
2741 }
2742 -
2743 -/*
2744 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2745 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2746 - */
2747 -unsigned long arch_align_stack(unsigned long sp)
2748 -{
2749 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750 - sp -= get_random_int() & ~PAGE_MASK;
2751 -
2752 - return sp & ALMASK;
2753 -}
2754 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2755 index 69ebd58..e4bff83 100644
2756 --- a/arch/mips/mm/fault.c
2757 +++ b/arch/mips/mm/fault.c
2758 @@ -28,6 +28,23 @@
2759 #include <asm/highmem.h> /* For VMALLOC_END */
2760 #include <linux/kdebug.h>
2761
2762 +#ifdef CONFIG_PAX_PAGEEXEC
2763 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2764 +{
2765 + unsigned long i;
2766 +
2767 + printk(KERN_ERR "PAX: bytes at PC: ");
2768 + for (i = 0; i < 5; i++) {
2769 + unsigned int c;
2770 + if (get_user(c, (unsigned int *)pc+i))
2771 + printk(KERN_CONT "???????? ");
2772 + else
2773 + printk(KERN_CONT "%08x ", c);
2774 + }
2775 + printk("\n");
2776 +}
2777 +#endif
2778 +
2779 /*
2780 * This routine handles page faults. It determines the address,
2781 * and the problem, and then passes it off to one of the appropriate
2782 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2783 index 302d779..7d35bf8 100644
2784 --- a/arch/mips/mm/mmap.c
2785 +++ b/arch/mips/mm/mmap.c
2786 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2787 do_color_align = 1;
2788
2789 /* requesting a specific address */
2790 +
2791 +#ifdef CONFIG_PAX_RANDMMAP
2792 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2793 +#endif
2794 +
2795 if (addr) {
2796 if (do_color_align)
2797 addr = COLOUR_ALIGN(addr, pgoff);
2798 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2799 addr = PAGE_ALIGN(addr);
2800
2801 vma = find_vma(mm, addr);
2802 - if (TASK_SIZE - len >= addr &&
2803 - (!vma || addr + len <= vma->vm_start))
2804 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2805 return addr;
2806 }
2807
2808 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2809 /* At this point: (!vma || addr < vma->vm_end). */
2810 if (TASK_SIZE - len < addr)
2811 return -ENOMEM;
2812 - if (!vma || addr + len <= vma->vm_start)
2813 + if (check_heap_stack_gap(vmm, addr, len))
2814 return addr;
2815 addr = vma->vm_end;
2816 if (do_color_align)
2817 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2818 /* make sure it can fit in the remaining address space */
2819 if (likely(addr > len)) {
2820 vma = find_vma(mm, addr - len);
2821 - if (!vma || addr <= vma->vm_start) {
2822 + if (check_heap_stack_gap(vmm, addr - len, len))
2823 /* cache the address as a hint for next time */
2824 return mm->free_area_cache = addr - len;
2825 }
2826 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2827 * return with success:
2828 */
2829 vma = find_vma(mm, addr);
2830 - if (likely(!vma || addr + len <= vma->vm_start)) {
2831 + if (check_heap_stack_gap(vmm, addr, len)) {
2832 /* cache the address as a hint for next time */
2833 return mm->free_area_cache = addr;
2834 }
2835 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2836 mm->unmap_area = arch_unmap_area_topdown;
2837 }
2838 }
2839 -
2840 -static inline unsigned long brk_rnd(void)
2841 -{
2842 - unsigned long rnd = get_random_int();
2843 -
2844 - rnd = rnd << PAGE_SHIFT;
2845 - /* 8MB for 32bit, 256MB for 64bit */
2846 - if (TASK_IS_32BIT_ADDR)
2847 - rnd = rnd & 0x7ffffful;
2848 - else
2849 - rnd = rnd & 0xffffffful;
2850 -
2851 - return rnd;
2852 -}
2853 -
2854 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2855 -{
2856 - unsigned long base = mm->brk;
2857 - unsigned long ret;
2858 -
2859 - ret = PAGE_ALIGN(base + brk_rnd());
2860 -
2861 - if (ret < mm->brk)
2862 - return mm->brk;
2863 -
2864 - return ret;
2865 -}
2866 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2867 index 967d144..db12197 100644
2868 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2869 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2870 @@ -11,12 +11,14 @@
2871 #ifndef _ASM_PROC_CACHE_H
2872 #define _ASM_PROC_CACHE_H
2873
2874 +#include <linux/const.h>
2875 +
2876 /* L1 cache */
2877
2878 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2879 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2880 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2881 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2882 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2883 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2884
2885 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2886 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2887 index bcb5df2..84fabd2 100644
2888 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2889 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2890 @@ -16,13 +16,15 @@
2891 #ifndef _ASM_PROC_CACHE_H
2892 #define _ASM_PROC_CACHE_H
2893
2894 +#include <linux/const.h>
2895 +
2896 /*
2897 * L1 cache
2898 */
2899 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2900 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2901 -#define L1_CACHE_BYTES 32 /* bytes per entry */
2902 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2903 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2904 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2905
2906 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2907 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2908 index 4ce7a01..449202a 100644
2909 --- a/arch/openrisc/include/asm/cache.h
2910 +++ b/arch/openrisc/include/asm/cache.h
2911 @@ -19,11 +19,13 @@
2912 #ifndef __ASM_OPENRISC_CACHE_H
2913 #define __ASM_OPENRISC_CACHE_H
2914
2915 +#include <linux/const.h>
2916 +
2917 /* FIXME: How can we replace these with values from the CPU...
2918 * they shouldn't be hard-coded!
2919 */
2920
2921 -#define L1_CACHE_BYTES 16
2922 #define L1_CACHE_SHIFT 4
2923 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2924
2925 #endif /* __ASM_OPENRISC_CACHE_H */
2926 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2927 index 4054b31..a10c105 100644
2928 --- a/arch/parisc/include/asm/atomic.h
2929 +++ b/arch/parisc/include/asm/atomic.h
2930 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2931
2932 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2933
2934 +#define atomic64_read_unchecked(v) atomic64_read(v)
2935 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2936 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2937 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2938 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2939 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2940 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2941 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2942 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2943 +
2944 #endif /* !CONFIG_64BIT */
2945
2946
2947 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2948 index 47f11c7..3420df2 100644
2949 --- a/arch/parisc/include/asm/cache.h
2950 +++ b/arch/parisc/include/asm/cache.h
2951 @@ -5,6 +5,7 @@
2952 #ifndef __ARCH_PARISC_CACHE_H
2953 #define __ARCH_PARISC_CACHE_H
2954
2955 +#include <linux/const.h>
2956
2957 /*
2958 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2959 @@ -15,13 +16,13 @@
2960 * just ruin performance.
2961 */
2962 #ifdef CONFIG_PA20
2963 -#define L1_CACHE_BYTES 64
2964 #define L1_CACHE_SHIFT 6
2965 #else
2966 -#define L1_CACHE_BYTES 32
2967 #define L1_CACHE_SHIFT 5
2968 #endif
2969
2970 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2971 +
2972 #ifndef __ASSEMBLY__
2973
2974 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2975 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2976 index 19f6cb1..6c78cf2 100644
2977 --- a/arch/parisc/include/asm/elf.h
2978 +++ b/arch/parisc/include/asm/elf.h
2979 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2980
2981 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2982
2983 +#ifdef CONFIG_PAX_ASLR
2984 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2985 +
2986 +#define PAX_DELTA_MMAP_LEN 16
2987 +#define PAX_DELTA_STACK_LEN 16
2988 +#endif
2989 +
2990 /* This yields a mask that user programs can use to figure out what
2991 instruction set this CPU supports. This could be done in user space,
2992 but it's not easy, and we've already done it here. */
2993 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2994 index 22dadeb..f6c2be4 100644
2995 --- a/arch/parisc/include/asm/pgtable.h
2996 +++ b/arch/parisc/include/asm/pgtable.h
2997 @@ -210,6 +210,17 @@ struct vm_area_struct;
2998 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2999 #define PAGE_COPY PAGE_EXECREAD
3000 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3001 +
3002 +#ifdef CONFIG_PAX_PAGEEXEC
3003 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3004 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3005 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3006 +#else
3007 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3008 +# define PAGE_COPY_NOEXEC PAGE_COPY
3009 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3010 +#endif
3011 +
3012 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3013 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3014 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3015 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3016 index 5e34ccf..672bc9c 100644
3017 --- a/arch/parisc/kernel/module.c
3018 +++ b/arch/parisc/kernel/module.c
3019 @@ -98,16 +98,38 @@
3020
3021 /* three functions to determine where in the module core
3022 * or init pieces the location is */
3023 +static inline int in_init_rx(struct module *me, void *loc)
3024 +{
3025 + return (loc >= me->module_init_rx &&
3026 + loc < (me->module_init_rx + me->init_size_rx));
3027 +}
3028 +
3029 +static inline int in_init_rw(struct module *me, void *loc)
3030 +{
3031 + return (loc >= me->module_init_rw &&
3032 + loc < (me->module_init_rw + me->init_size_rw));
3033 +}
3034 +
3035 static inline int in_init(struct module *me, void *loc)
3036 {
3037 - return (loc >= me->module_init &&
3038 - loc <= (me->module_init + me->init_size));
3039 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3040 +}
3041 +
3042 +static inline int in_core_rx(struct module *me, void *loc)
3043 +{
3044 + return (loc >= me->module_core_rx &&
3045 + loc < (me->module_core_rx + me->core_size_rx));
3046 +}
3047 +
3048 +static inline int in_core_rw(struct module *me, void *loc)
3049 +{
3050 + return (loc >= me->module_core_rw &&
3051 + loc < (me->module_core_rw + me->core_size_rw));
3052 }
3053
3054 static inline int in_core(struct module *me, void *loc)
3055 {
3056 - return (loc >= me->module_core &&
3057 - loc <= (me->module_core + me->core_size));
3058 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3059 }
3060
3061 static inline int in_local(struct module *me, void *loc)
3062 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3063 }
3064
3065 /* align things a bit */
3066 - me->core_size = ALIGN(me->core_size, 16);
3067 - me->arch.got_offset = me->core_size;
3068 - me->core_size += gots * sizeof(struct got_entry);
3069 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3070 + me->arch.got_offset = me->core_size_rw;
3071 + me->core_size_rw += gots * sizeof(struct got_entry);
3072
3073 - me->core_size = ALIGN(me->core_size, 16);
3074 - me->arch.fdesc_offset = me->core_size;
3075 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3076 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3077 + me->arch.fdesc_offset = me->core_size_rw;
3078 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3079
3080 me->arch.got_max = gots;
3081 me->arch.fdesc_max = fdescs;
3082 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3083
3084 BUG_ON(value == 0);
3085
3086 - got = me->module_core + me->arch.got_offset;
3087 + got = me->module_core_rw + me->arch.got_offset;
3088 for (i = 0; got[i].addr; i++)
3089 if (got[i].addr == value)
3090 goto out;
3091 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3092 #ifdef CONFIG_64BIT
3093 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3094 {
3095 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3096 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3097
3098 if (!value) {
3099 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3100 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3101
3102 /* Create new one */
3103 fdesc->addr = value;
3104 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3105 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3106 return (Elf_Addr)fdesc;
3107 }
3108 #endif /* CONFIG_64BIT */
3109 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3110
3111 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3112 end = table + sechdrs[me->arch.unwind_section].sh_size;
3113 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3114 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3115
3116 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3117 me->arch.unwind_section, table, end, gp);
3118 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3119 index c9b9322..02d8940 100644
3120 --- a/arch/parisc/kernel/sys_parisc.c
3121 +++ b/arch/parisc/kernel/sys_parisc.c
3122 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3123 /* At this point: (!vma || addr < vma->vm_end). */
3124 if (TASK_SIZE - len < addr)
3125 return -ENOMEM;
3126 - if (!vma || addr + len <= vma->vm_start)
3127 + if (check_heap_stack_gap(vma, addr, len))
3128 return addr;
3129 addr = vma->vm_end;
3130 }
3131 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3132 /* At this point: (!vma || addr < vma->vm_end). */
3133 if (TASK_SIZE - len < addr)
3134 return -ENOMEM;
3135 - if (!vma || addr + len <= vma->vm_start)
3136 + if (check_heap_stack_gap(vma, addr, len))
3137 return addr;
3138 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3139 if (addr < vma->vm_end) /* handle wraparound */
3140 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3141 if (flags & MAP_FIXED)
3142 return addr;
3143 if (!addr)
3144 - addr = TASK_UNMAPPED_BASE;
3145 + addr = current->mm->mmap_base;
3146
3147 if (filp) {
3148 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3149 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3150 index f19e660..414fe24 100644
3151 --- a/arch/parisc/kernel/traps.c
3152 +++ b/arch/parisc/kernel/traps.c
3153 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3154
3155 down_read(&current->mm->mmap_sem);
3156 vma = find_vma(current->mm,regs->iaoq[0]);
3157 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3158 - && (vma->vm_flags & VM_EXEC)) {
3159 -
3160 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3161 fault_address = regs->iaoq[0];
3162 fault_space = regs->iasq[0];
3163
3164 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3165 index 18162ce..94de376 100644
3166 --- a/arch/parisc/mm/fault.c
3167 +++ b/arch/parisc/mm/fault.c
3168 @@ -15,6 +15,7 @@
3169 #include <linux/sched.h>
3170 #include <linux/interrupt.h>
3171 #include <linux/module.h>
3172 +#include <linux/unistd.h>
3173
3174 #include <asm/uaccess.h>
3175 #include <asm/traps.h>
3176 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3177 static unsigned long
3178 parisc_acctyp(unsigned long code, unsigned int inst)
3179 {
3180 - if (code == 6 || code == 16)
3181 + if (code == 6 || code == 7 || code == 16)
3182 return VM_EXEC;
3183
3184 switch (inst & 0xf0000000) {
3185 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3186 }
3187 #endif
3188
3189 +#ifdef CONFIG_PAX_PAGEEXEC
3190 +/*
3191 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3192 + *
3193 + * returns 1 when task should be killed
3194 + * 2 when rt_sigreturn trampoline was detected
3195 + * 3 when unpatched PLT trampoline was detected
3196 + */
3197 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3198 +{
3199 +
3200 +#ifdef CONFIG_PAX_EMUPLT
3201 + int err;
3202 +
3203 + do { /* PaX: unpatched PLT emulation */
3204 + unsigned int bl, depwi;
3205 +
3206 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3207 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3208 +
3209 + if (err)
3210 + break;
3211 +
3212 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3213 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3214 +
3215 + err = get_user(ldw, (unsigned int *)addr);
3216 + err |= get_user(bv, (unsigned int *)(addr+4));
3217 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3218 +
3219 + if (err)
3220 + break;
3221 +
3222 + if (ldw == 0x0E801096U &&
3223 + bv == 0xEAC0C000U &&
3224 + ldw2 == 0x0E881095U)
3225 + {
3226 + unsigned int resolver, map;
3227 +
3228 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3229 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3230 + if (err)
3231 + break;
3232 +
3233 + regs->gr[20] = instruction_pointer(regs)+8;
3234 + regs->gr[21] = map;
3235 + regs->gr[22] = resolver;
3236 + regs->iaoq[0] = resolver | 3UL;
3237 + regs->iaoq[1] = regs->iaoq[0] + 4;
3238 + return 3;
3239 + }
3240 + }
3241 + } while (0);
3242 +#endif
3243 +
3244 +#ifdef CONFIG_PAX_EMUTRAMP
3245 +
3246 +#ifndef CONFIG_PAX_EMUSIGRT
3247 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3248 + return 1;
3249 +#endif
3250 +
3251 + do { /* PaX: rt_sigreturn emulation */
3252 + unsigned int ldi1, ldi2, bel, nop;
3253 +
3254 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3255 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3256 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3257 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3258 +
3259 + if (err)
3260 + break;
3261 +
3262 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3263 + ldi2 == 0x3414015AU &&
3264 + bel == 0xE4008200U &&
3265 + nop == 0x08000240U)
3266 + {
3267 + regs->gr[25] = (ldi1 & 2) >> 1;
3268 + regs->gr[20] = __NR_rt_sigreturn;
3269 + regs->gr[31] = regs->iaoq[1] + 16;
3270 + regs->sr[0] = regs->iasq[1];
3271 + regs->iaoq[0] = 0x100UL;
3272 + regs->iaoq[1] = regs->iaoq[0] + 4;
3273 + regs->iasq[0] = regs->sr[2];
3274 + regs->iasq[1] = regs->sr[2];
3275 + return 2;
3276 + }
3277 + } while (0);
3278 +#endif
3279 +
3280 + return 1;
3281 +}
3282 +
3283 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3284 +{
3285 + unsigned long i;
3286 +
3287 + printk(KERN_ERR "PAX: bytes at PC: ");
3288 + for (i = 0; i < 5; i++) {
3289 + unsigned int c;
3290 + if (get_user(c, (unsigned int *)pc+i))
3291 + printk(KERN_CONT "???????? ");
3292 + else
3293 + printk(KERN_CONT "%08x ", c);
3294 + }
3295 + printk("\n");
3296 +}
3297 +#endif
3298 +
3299 int fixup_exception(struct pt_regs *regs)
3300 {
3301 const struct exception_table_entry *fix;
3302 @@ -192,8 +303,33 @@ good_area:
3303
3304 acc_type = parisc_acctyp(code,regs->iir);
3305
3306 - if ((vma->vm_flags & acc_type) != acc_type)
3307 + if ((vma->vm_flags & acc_type) != acc_type) {
3308 +
3309 +#ifdef CONFIG_PAX_PAGEEXEC
3310 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3311 + (address & ~3UL) == instruction_pointer(regs))
3312 + {
3313 + up_read(&mm->mmap_sem);
3314 + switch (pax_handle_fetch_fault(regs)) {
3315 +
3316 +#ifdef CONFIG_PAX_EMUPLT
3317 + case 3:
3318 + return;
3319 +#endif
3320 +
3321 +#ifdef CONFIG_PAX_EMUTRAMP
3322 + case 2:
3323 + return;
3324 +#endif
3325 +
3326 + }
3327 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3328 + do_group_exit(SIGKILL);
3329 + }
3330 +#endif
3331 +
3332 goto bad_area;
3333 + }
3334
3335 /*
3336 * If for any reason at all we couldn't handle the fault, make
3337 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3338 index 02e41b5..ec6e26c 100644
3339 --- a/arch/powerpc/include/asm/atomic.h
3340 +++ b/arch/powerpc/include/asm/atomic.h
3341 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3342
3343 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3344
3345 +#define atomic64_read_unchecked(v) atomic64_read(v)
3346 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3347 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3348 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3349 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3350 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3351 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3352 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3353 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3354 +
3355 #endif /* __powerpc64__ */
3356
3357 #endif /* __KERNEL__ */
3358 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3359 index 4b50941..5605819 100644
3360 --- a/arch/powerpc/include/asm/cache.h
3361 +++ b/arch/powerpc/include/asm/cache.h
3362 @@ -3,6 +3,7 @@
3363
3364 #ifdef __KERNEL__
3365
3366 +#include <linux/const.h>
3367
3368 /* bytes per L1 cache line */
3369 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3370 @@ -22,7 +23,7 @@
3371 #define L1_CACHE_SHIFT 7
3372 #endif
3373
3374 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3375 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3376
3377 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3378
3379 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3380 index 3bf9cca..e7457d0 100644
3381 --- a/arch/powerpc/include/asm/elf.h
3382 +++ b/arch/powerpc/include/asm/elf.h
3383 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3384 the loader. We need to make sure that it is out of the way of the program
3385 that it will "exec", and that there is sufficient room for the brk. */
3386
3387 -extern unsigned long randomize_et_dyn(unsigned long base);
3388 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3389 +#define ELF_ET_DYN_BASE (0x20000000)
3390 +
3391 +#ifdef CONFIG_PAX_ASLR
3392 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3393 +
3394 +#ifdef __powerpc64__
3395 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3396 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3397 +#else
3398 +#define PAX_DELTA_MMAP_LEN 15
3399 +#define PAX_DELTA_STACK_LEN 15
3400 +#endif
3401 +#endif
3402
3403 /*
3404 * Our registers are always unsigned longs, whether we're a 32 bit
3405 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3406 (0x7ff >> (PAGE_SHIFT - 12)) : \
3407 (0x3ffff >> (PAGE_SHIFT - 12)))
3408
3409 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3410 -#define arch_randomize_brk arch_randomize_brk
3411 -
3412 #endif /* __KERNEL__ */
3413
3414 /*
3415 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3416 index bca8fdc..61e9580 100644
3417 --- a/arch/powerpc/include/asm/kmap_types.h
3418 +++ b/arch/powerpc/include/asm/kmap_types.h
3419 @@ -27,6 +27,7 @@ enum km_type {
3420 KM_PPC_SYNC_PAGE,
3421 KM_PPC_SYNC_ICACHE,
3422 KM_KDB,
3423 + KM_CLEARPAGE,
3424 KM_TYPE_NR
3425 };
3426
3427 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3428 index d4a7f64..451de1c 100644
3429 --- a/arch/powerpc/include/asm/mman.h
3430 +++ b/arch/powerpc/include/asm/mman.h
3431 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3432 }
3433 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3434
3435 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3436 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3437 {
3438 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3439 }
3440 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3441 index f072e97..b436dee 100644
3442 --- a/arch/powerpc/include/asm/page.h
3443 +++ b/arch/powerpc/include/asm/page.h
3444 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3445 * and needs to be executable. This means the whole heap ends
3446 * up being executable.
3447 */
3448 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3449 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3450 +#define VM_DATA_DEFAULT_FLAGS32 \
3451 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3452 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3453
3454 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3455 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3456 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3457 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3458 #endif
3459
3460 +#define ktla_ktva(addr) (addr)
3461 +#define ktva_ktla(addr) (addr)
3462 +
3463 /*
3464 * Use the top bit of the higher-level page table entries to indicate whether
3465 * the entries we point to contain hugepages. This works because we know that
3466 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3467 index fed85e6..da5c71b 100644
3468 --- a/arch/powerpc/include/asm/page_64.h
3469 +++ b/arch/powerpc/include/asm/page_64.h
3470 @@ -146,15 +146,18 @@ do { \
3471 * stack by default, so in the absence of a PT_GNU_STACK program header
3472 * we turn execute permission off.
3473 */
3474 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3475 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3476 +#define VM_STACK_DEFAULT_FLAGS32 \
3477 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3478 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3479
3480 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3481 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3482
3483 +#ifndef CONFIG_PAX_PAGEEXEC
3484 #define VM_STACK_DEFAULT_FLAGS \
3485 (is_32bit_task() ? \
3486 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3487 +#endif
3488
3489 #include <asm-generic/getorder.h>
3490
3491 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3492 index 2e0e411..7899c68 100644
3493 --- a/arch/powerpc/include/asm/pgtable.h
3494 +++ b/arch/powerpc/include/asm/pgtable.h
3495 @@ -2,6 +2,7 @@
3496 #define _ASM_POWERPC_PGTABLE_H
3497 #ifdef __KERNEL__
3498
3499 +#include <linux/const.h>
3500 #ifndef __ASSEMBLY__
3501 #include <asm/processor.h> /* For TASK_SIZE */
3502 #include <asm/mmu.h>
3503 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3504 index 4aad413..85d86bf 100644
3505 --- a/arch/powerpc/include/asm/pte-hash32.h
3506 +++ b/arch/powerpc/include/asm/pte-hash32.h
3507 @@ -21,6 +21,7 @@
3508 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3509 #define _PAGE_USER 0x004 /* usermode access allowed */
3510 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3511 +#define _PAGE_EXEC _PAGE_GUARDED
3512 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3513 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3514 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3515 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3516 index 7fdc2c0..e47a9b02d3 100644
3517 --- a/arch/powerpc/include/asm/reg.h
3518 +++ b/arch/powerpc/include/asm/reg.h
3519 @@ -212,6 +212,7 @@
3520 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3521 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3522 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3523 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3524 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3525 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3526 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3527 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3528 index c377457..3c69fbc 100644
3529 --- a/arch/powerpc/include/asm/system.h
3530 +++ b/arch/powerpc/include/asm/system.h
3531 @@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3532 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3533 #endif
3534
3535 -extern unsigned long arch_align_stack(unsigned long sp);
3536 +#define arch_align_stack(x) ((x) & ~0xfUL)
3537
3538 /* Used in very early kernel initialization. */
3539 extern unsigned long reloc_offset(void);
3540 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3541 index bd0fb84..a42a14b 100644
3542 --- a/arch/powerpc/include/asm/uaccess.h
3543 +++ b/arch/powerpc/include/asm/uaccess.h
3544 @@ -13,6 +13,8 @@
3545 #define VERIFY_READ 0
3546 #define VERIFY_WRITE 1
3547
3548 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3549 +
3550 /*
3551 * The fs value determines whether argument validity checking should be
3552 * performed or not. If get_fs() == USER_DS, checking is performed, with
3553 @@ -327,52 +329,6 @@ do { \
3554 extern unsigned long __copy_tofrom_user(void __user *to,
3555 const void __user *from, unsigned long size);
3556
3557 -#ifndef __powerpc64__
3558 -
3559 -static inline unsigned long copy_from_user(void *to,
3560 - const void __user *from, unsigned long n)
3561 -{
3562 - unsigned long over;
3563 -
3564 - if (access_ok(VERIFY_READ, from, n))
3565 - return __copy_tofrom_user((__force void __user *)to, from, n);
3566 - if ((unsigned long)from < TASK_SIZE) {
3567 - over = (unsigned long)from + n - TASK_SIZE;
3568 - return __copy_tofrom_user((__force void __user *)to, from,
3569 - n - over) + over;
3570 - }
3571 - return n;
3572 -}
3573 -
3574 -static inline unsigned long copy_to_user(void __user *to,
3575 - const void *from, unsigned long n)
3576 -{
3577 - unsigned long over;
3578 -
3579 - if (access_ok(VERIFY_WRITE, to, n))
3580 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3581 - if ((unsigned long)to < TASK_SIZE) {
3582 - over = (unsigned long)to + n - TASK_SIZE;
3583 - return __copy_tofrom_user(to, (__force void __user *)from,
3584 - n - over) + over;
3585 - }
3586 - return n;
3587 -}
3588 -
3589 -#else /* __powerpc64__ */
3590 -
3591 -#define __copy_in_user(to, from, size) \
3592 - __copy_tofrom_user((to), (from), (size))
3593 -
3594 -extern unsigned long copy_from_user(void *to, const void __user *from,
3595 - unsigned long n);
3596 -extern unsigned long copy_to_user(void __user *to, const void *from,
3597 - unsigned long n);
3598 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3599 - unsigned long n);
3600 -
3601 -#endif /* __powerpc64__ */
3602 -
3603 static inline unsigned long __copy_from_user_inatomic(void *to,
3604 const void __user *from, unsigned long n)
3605 {
3606 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3607 if (ret == 0)
3608 return 0;
3609 }
3610 +
3611 + if (!__builtin_constant_p(n))
3612 + check_object_size(to, n, false);
3613 +
3614 return __copy_tofrom_user((__force void __user *)to, from, n);
3615 }
3616
3617 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3618 if (ret == 0)
3619 return 0;
3620 }
3621 +
3622 + if (!__builtin_constant_p(n))
3623 + check_object_size(from, n, true);
3624 +
3625 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3626 }
3627
3628 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3629 return __copy_to_user_inatomic(to, from, size);
3630 }
3631
3632 +#ifndef __powerpc64__
3633 +
3634 +static inline unsigned long __must_check copy_from_user(void *to,
3635 + const void __user *from, unsigned long n)
3636 +{
3637 + unsigned long over;
3638 +
3639 + if ((long)n < 0)
3640 + return n;
3641 +
3642 + if (access_ok(VERIFY_READ, from, n)) {
3643 + if (!__builtin_constant_p(n))
3644 + check_object_size(to, n, false);
3645 + return __copy_tofrom_user((__force void __user *)to, from, n);
3646 + }
3647 + if ((unsigned long)from < TASK_SIZE) {
3648 + over = (unsigned long)from + n - TASK_SIZE;
3649 + if (!__builtin_constant_p(n - over))
3650 + check_object_size(to, n - over, false);
3651 + return __copy_tofrom_user((__force void __user *)to, from,
3652 + n - over) + over;
3653 + }
3654 + return n;
3655 +}
3656 +
3657 +static inline unsigned long __must_check copy_to_user(void __user *to,
3658 + const void *from, unsigned long n)
3659 +{
3660 + unsigned long over;
3661 +
3662 + if ((long)n < 0)
3663 + return n;
3664 +
3665 + if (access_ok(VERIFY_WRITE, to, n)) {
3666 + if (!__builtin_constant_p(n))
3667 + check_object_size(from, n, true);
3668 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3669 + }
3670 + if ((unsigned long)to < TASK_SIZE) {
3671 + over = (unsigned long)to + n - TASK_SIZE;
3672 + if (!__builtin_constant_p(n))
3673 + check_object_size(from, n - over, true);
3674 + return __copy_tofrom_user(to, (__force void __user *)from,
3675 + n - over) + over;
3676 + }
3677 + return n;
3678 +}
3679 +
3680 +#else /* __powerpc64__ */
3681 +
3682 +#define __copy_in_user(to, from, size) \
3683 + __copy_tofrom_user((to), (from), (size))
3684 +
3685 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3686 +{
3687 + if ((long)n < 0 || n > INT_MAX)
3688 + return n;
3689 +
3690 + if (!__builtin_constant_p(n))
3691 + check_object_size(to, n, false);
3692 +
3693 + if (likely(access_ok(VERIFY_READ, from, n)))
3694 + n = __copy_from_user(to, from, n);
3695 + else
3696 + memset(to, 0, n);
3697 + return n;
3698 +}
3699 +
3700 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3701 +{
3702 + if ((long)n < 0 || n > INT_MAX)
3703 + return n;
3704 +
3705 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3706 + if (!__builtin_constant_p(n))
3707 + check_object_size(from, n, true);
3708 + n = __copy_to_user(to, from, n);
3709 + }
3710 + return n;
3711 +}
3712 +
3713 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3714 + unsigned long n);
3715 +
3716 +#endif /* __powerpc64__ */
3717 +
3718 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3719
3720 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3721 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3722 index 429983c..7af363b 100644
3723 --- a/arch/powerpc/kernel/exceptions-64e.S
3724 +++ b/arch/powerpc/kernel/exceptions-64e.S
3725 @@ -587,6 +587,7 @@ storage_fault_common:
3726 std r14,_DAR(r1)
3727 std r15,_DSISR(r1)
3728 addi r3,r1,STACK_FRAME_OVERHEAD
3729 + bl .save_nvgprs
3730 mr r4,r14
3731 mr r5,r15
3732 ld r14,PACA_EXGEN+EX_R14(r13)
3733 @@ -596,8 +597,7 @@ storage_fault_common:
3734 cmpdi r3,0
3735 bne- 1f
3736 b .ret_from_except_lite
3737 -1: bl .save_nvgprs
3738 - mr r5,r3
3739 +1: mr r5,r3
3740 addi r3,r1,STACK_FRAME_OVERHEAD
3741 ld r4,_DAR(r1)
3742 bl .bad_page_fault
3743 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3744 index 15c5a4f..22a4000 100644
3745 --- a/arch/powerpc/kernel/exceptions-64s.S
3746 +++ b/arch/powerpc/kernel/exceptions-64s.S
3747 @@ -1004,10 +1004,10 @@ handle_page_fault:
3748 11: ld r4,_DAR(r1)
3749 ld r5,_DSISR(r1)
3750 addi r3,r1,STACK_FRAME_OVERHEAD
3751 + bl .save_nvgprs
3752 bl .do_page_fault
3753 cmpdi r3,0
3754 beq+ 13f
3755 - bl .save_nvgprs
3756 mr r5,r3
3757 addi r3,r1,STACK_FRAME_OVERHEAD
3758 lwz r4,_DAR(r1)
3759 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3760 index 01e2877..a1ba360 100644
3761 --- a/arch/powerpc/kernel/irq.c
3762 +++ b/arch/powerpc/kernel/irq.c
3763 @@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3764 host->ops = ops;
3765 host->of_node = of_node_get(of_node);
3766
3767 - if (host->ops->match == NULL)
3768 - host->ops->match = default_irq_host_match;
3769 -
3770 raw_spin_lock_irqsave(&irq_big_lock, flags);
3771
3772 /* If it's a legacy controller, check for duplicates and
3773 @@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3774 */
3775 raw_spin_lock_irqsave(&irq_big_lock, flags);
3776 list_for_each_entry(h, &irq_hosts, link)
3777 - if (h->ops->match(h, node)) {
3778 + if (h->ops->match) {
3779 + if (h->ops->match(h, node)) {
3780 + found = h;
3781 + break;
3782 + }
3783 + } else if (default_irq_host_match(h, node)) {
3784 found = h;
3785 break;
3786 }
3787 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3788 index 0b6d796..d760ddb 100644
3789 --- a/arch/powerpc/kernel/module_32.c
3790 +++ b/arch/powerpc/kernel/module_32.c
3791 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3792 me->arch.core_plt_section = i;
3793 }
3794 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3795 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3796 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3797 return -ENOEXEC;
3798 }
3799
3800 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3801
3802 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3803 /* Init, or core PLT? */
3804 - if (location >= mod->module_core
3805 - && location < mod->module_core + mod->core_size)
3806 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3807 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3808 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3809 - else
3810 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3811 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3812 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3813 + else {
3814 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3815 + return ~0UL;
3816 + }
3817
3818 /* Find this entry, or if that fails, the next avail. entry */
3819 while (entry->jump[0]) {
3820 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3821 index d817ab0..b23b18e 100644
3822 --- a/arch/powerpc/kernel/process.c
3823 +++ b/arch/powerpc/kernel/process.c
3824 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
3825 * Lookup NIP late so we have the best change of getting the
3826 * above info out without failing
3827 */
3828 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3829 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3830 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3831 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3832 #endif
3833 show_stack(current, (unsigned long *) regs->gpr[1]);
3834 if (!user_mode(regs))
3835 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3836 newsp = stack[0];
3837 ip = stack[STACK_FRAME_LR_SAVE];
3838 if (!firstframe || ip != lr) {
3839 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3840 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3841 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3842 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3843 - printk(" (%pS)",
3844 + printk(" (%pA)",
3845 (void *)current->ret_stack[curr_frame].ret);
3846 curr_frame--;
3847 }
3848 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3849 struct pt_regs *regs = (struct pt_regs *)
3850 (sp + STACK_FRAME_OVERHEAD);
3851 lr = regs->link;
3852 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3853 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3854 regs->trap, (void *)regs->nip, (void *)lr);
3855 firstframe = 1;
3856 }
3857 @@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
3858 }
3859
3860 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3861 -
3862 -unsigned long arch_align_stack(unsigned long sp)
3863 -{
3864 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3865 - sp -= get_random_int() & ~PAGE_MASK;
3866 - return sp & ~0xf;
3867 -}
3868 -
3869 -static inline unsigned long brk_rnd(void)
3870 -{
3871 - unsigned long rnd = 0;
3872 -
3873 - /* 8MB for 32bit, 1GB for 64bit */
3874 - if (is_32bit_task())
3875 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3876 - else
3877 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3878 -
3879 - return rnd << PAGE_SHIFT;
3880 -}
3881 -
3882 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3883 -{
3884 - unsigned long base = mm->brk;
3885 - unsigned long ret;
3886 -
3887 -#ifdef CONFIG_PPC_STD_MMU_64
3888 - /*
3889 - * If we are using 1TB segments and we are allowed to randomise
3890 - * the heap, we can put it above 1TB so it is backed by a 1TB
3891 - * segment. Otherwise the heap will be in the bottom 1TB
3892 - * which always uses 256MB segments and this may result in a
3893 - * performance penalty.
3894 - */
3895 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3896 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3897 -#endif
3898 -
3899 - ret = PAGE_ALIGN(base + brk_rnd());
3900 -
3901 - if (ret < mm->brk)
3902 - return mm->brk;
3903 -
3904 - return ret;
3905 -}
3906 -
3907 -unsigned long randomize_et_dyn(unsigned long base)
3908 -{
3909 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3910 -
3911 - if (ret < base)
3912 - return base;
3913 -
3914 - return ret;
3915 -}
3916 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3917 index 836a5a1..27289a3 100644
3918 --- a/arch/powerpc/kernel/signal_32.c
3919 +++ b/arch/powerpc/kernel/signal_32.c
3920 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3921 /* Save user registers on the stack */
3922 frame = &rt_sf->uc.uc_mcontext;
3923 addr = frame;
3924 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3925 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3926 if (save_user_regs(regs, frame, 0, 1))
3927 goto badframe;
3928 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3929 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3930 index a50b5ec..547078a 100644
3931 --- a/arch/powerpc/kernel/signal_64.c
3932 +++ b/arch/powerpc/kernel/signal_64.c
3933 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3934 current->thread.fpscr.val = 0;
3935
3936 /* Set up to return from userspace. */
3937 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3938 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3939 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3940 } else {
3941 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3942 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3943 index c091527..5592625 100644
3944 --- a/arch/powerpc/kernel/traps.c
3945 +++ b/arch/powerpc/kernel/traps.c
3946 @@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
3947 return flags;
3948 }
3949
3950 +extern void gr_handle_kernel_exploit(void);
3951 +
3952 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3953 int signr)
3954 {
3955 @@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3956 panic("Fatal exception in interrupt");
3957 if (panic_on_oops)
3958 panic("Fatal exception");
3959 +
3960 + gr_handle_kernel_exploit();
3961 +
3962 do_exit(signr);
3963 }
3964
3965 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3966 index 7d14bb6..1305601 100644
3967 --- a/arch/powerpc/kernel/vdso.c
3968 +++ b/arch/powerpc/kernel/vdso.c
3969 @@ -35,6 +35,7 @@
3970 #include <asm/firmware.h>
3971 #include <asm/vdso.h>
3972 #include <asm/vdso_datapage.h>
3973 +#include <asm/mman.h>
3974
3975 #include "setup.h"
3976
3977 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3978 vdso_base = VDSO32_MBASE;
3979 #endif
3980
3981 - current->mm->context.vdso_base = 0;
3982 + current->mm->context.vdso_base = ~0UL;
3983
3984 /* vDSO has a problem and was disabled, just don't "enable" it for the
3985 * process
3986 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3987 vdso_base = get_unmapped_area(NULL, vdso_base,
3988 (vdso_pages << PAGE_SHIFT) +
3989 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3990 - 0, 0);
3991 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3992 if (IS_ERR_VALUE(vdso_base)) {
3993 rc = vdso_base;
3994 goto fail_mmapsem;
3995 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3996 index 5eea6f3..5d10396 100644
3997 --- a/arch/powerpc/lib/usercopy_64.c
3998 +++ b/arch/powerpc/lib/usercopy_64.c
3999 @@ -9,22 +9,6 @@
4000 #include <linux/module.h>
4001 #include <asm/uaccess.h>
4002
4003 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4004 -{
4005 - if (likely(access_ok(VERIFY_READ, from, n)))
4006 - n = __copy_from_user(to, from, n);
4007 - else
4008 - memset(to, 0, n);
4009 - return n;
4010 -}
4011 -
4012 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4013 -{
4014 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4015 - n = __copy_to_user(to, from, n);
4016 - return n;
4017 -}
4018 -
4019 unsigned long copy_in_user(void __user *to, const void __user *from,
4020 unsigned long n)
4021 {
4022 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4023 return n;
4024 }
4025
4026 -EXPORT_SYMBOL(copy_from_user);
4027 -EXPORT_SYMBOL(copy_to_user);
4028 EXPORT_SYMBOL(copy_in_user);
4029
4030 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4031 index 2f0d1b0..36fb5cc 100644
4032 --- a/arch/powerpc/mm/fault.c
4033 +++ b/arch/powerpc/mm/fault.c
4034 @@ -32,6 +32,10 @@
4035 #include <linux/perf_event.h>
4036 #include <linux/magic.h>
4037 #include <linux/ratelimit.h>
4038 +#include <linux/slab.h>
4039 +#include <linux/pagemap.h>
4040 +#include <linux/compiler.h>
4041 +#include <linux/unistd.h>
4042
4043 #include <asm/firmware.h>
4044 #include <asm/page.h>
4045 @@ -43,6 +47,7 @@
4046 #include <asm/tlbflush.h>
4047 #include <asm/siginfo.h>
4048 #include <mm/mmu_decl.h>
4049 +#include <asm/ptrace.h>
4050
4051 #include "icswx.h"
4052
4053 @@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4054 }
4055 #endif
4056
4057 +#ifdef CONFIG_PAX_PAGEEXEC
4058 +/*
4059 + * PaX: decide what to do with offenders (regs->nip = fault address)
4060 + *
4061 + * returns 1 when task should be killed
4062 + */
4063 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4064 +{
4065 + return 1;
4066 +}
4067 +
4068 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4069 +{
4070 + unsigned long i;
4071 +
4072 + printk(KERN_ERR "PAX: bytes at PC: ");
4073 + for (i = 0; i < 5; i++) {
4074 + unsigned int c;
4075 + if (get_user(c, (unsigned int __user *)pc+i))
4076 + printk(KERN_CONT "???????? ");
4077 + else
4078 + printk(KERN_CONT "%08x ", c);
4079 + }
4080 + printk("\n");
4081 +}
4082 +#endif
4083 +
4084 /*
4085 * Check whether the instruction at regs->nip is a store using
4086 * an update addressing form which will update r1.
4087 @@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4088 * indicate errors in DSISR but can validly be set in SRR1.
4089 */
4090 if (trap == 0x400)
4091 - error_code &= 0x48200000;
4092 + error_code &= 0x58200000;
4093 else
4094 is_write = error_code & DSISR_ISSTORE;
4095 #else
4096 @@ -276,7 +308,7 @@ good_area:
4097 * "undefined". Of those that can be set, this is the only
4098 * one which seems bad.
4099 */
4100 - if (error_code & 0x10000000)
4101 + if (error_code & DSISR_GUARDED)
4102 /* Guarded storage error. */
4103 goto bad_area;
4104 #endif /* CONFIG_8xx */
4105 @@ -291,7 +323,7 @@ good_area:
4106 * processors use the same I/D cache coherency mechanism
4107 * as embedded.
4108 */
4109 - if (error_code & DSISR_PROTFAULT)
4110 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4111 goto bad_area;
4112 #endif /* CONFIG_PPC_STD_MMU */
4113
4114 @@ -360,6 +392,23 @@ bad_area:
4115 bad_area_nosemaphore:
4116 /* User mode accesses cause a SIGSEGV */
4117 if (user_mode(regs)) {
4118 +
4119 +#ifdef CONFIG_PAX_PAGEEXEC
4120 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4121 +#ifdef CONFIG_PPC_STD_MMU
4122 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4123 +#else
4124 + if (is_exec && regs->nip == address) {
4125 +#endif
4126 + switch (pax_handle_fetch_fault(regs)) {
4127 + }
4128 +
4129 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4130 + do_group_exit(SIGKILL);
4131 + }
4132 + }
4133 +#endif
4134 +
4135 _exception(SIGSEGV, regs, code, address);
4136 return 0;
4137 }
4138 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4139 index 67a42ed..1c7210c 100644
4140 --- a/arch/powerpc/mm/mmap_64.c
4141 +++ b/arch/powerpc/mm/mmap_64.c
4142 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4143 */
4144 if (mmap_is_legacy()) {
4145 mm->mmap_base = TASK_UNMAPPED_BASE;
4146 +
4147 +#ifdef CONFIG_PAX_RANDMMAP
4148 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4149 + mm->mmap_base += mm->delta_mmap;
4150 +#endif
4151 +
4152 mm->get_unmapped_area = arch_get_unmapped_area;
4153 mm->unmap_area = arch_unmap_area;
4154 } else {
4155 mm->mmap_base = mmap_base();
4156 +
4157 +#ifdef CONFIG_PAX_RANDMMAP
4158 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4159 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4160 +#endif
4161 +
4162 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4163 mm->unmap_area = arch_unmap_area_topdown;
4164 }
4165 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4166 index 73709f7..6b90313 100644
4167 --- a/arch/powerpc/mm/slice.c
4168 +++ b/arch/powerpc/mm/slice.c
4169 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4170 if ((mm->task_size - len) < addr)
4171 return 0;
4172 vma = find_vma(mm, addr);
4173 - return (!vma || (addr + len) <= vma->vm_start);
4174 + return check_heap_stack_gap(vma, addr, len);
4175 }
4176
4177 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4178 @@ -256,7 +256,7 @@ full_search:
4179 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4180 continue;
4181 }
4182 - if (!vma || addr + len <= vma->vm_start) {
4183 + if (check_heap_stack_gap(vma, addr, len)) {
4184 /*
4185 * Remember the place where we stopped the search:
4186 */
4187 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4188 }
4189 }
4190
4191 - addr = mm->mmap_base;
4192 - while (addr > len) {
4193 + if (mm->mmap_base < len)
4194 + addr = -ENOMEM;
4195 + else
4196 + addr = mm->mmap_base - len;
4197 +
4198 + while (!IS_ERR_VALUE(addr)) {
4199 /* Go down by chunk size */
4200 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4201 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4202
4203 /* Check for hit with different page size */
4204 mask = slice_range_to_mask(addr, len);
4205 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4206 * return with success:
4207 */
4208 vma = find_vma(mm, addr);
4209 - if (!vma || (addr + len) <= vma->vm_start) {
4210 + if (check_heap_stack_gap(vma, addr, len)) {
4211 /* remember the address as a hint for next time */
4212 if (use_cache)
4213 mm->free_area_cache = addr;
4214 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4215 mm->cached_hole_size = vma->vm_start - addr;
4216
4217 /* try just below the current vma->vm_start */
4218 - addr = vma->vm_start;
4219 + addr = skip_heap_stack_gap(vma, len);
4220 }
4221
4222 /*
4223 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4224 if (fixed && addr > (mm->task_size - len))
4225 return -EINVAL;
4226
4227 +#ifdef CONFIG_PAX_RANDMMAP
4228 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4229 + addr = 0;
4230 +#endif
4231 +
4232 /* If hint, make sure it matches our alignment restrictions */
4233 if (!fixed && addr) {
4234 addr = _ALIGN_UP(addr, 1ul << pshift);
4235 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4236 index 8517d2a..d2738d4 100644
4237 --- a/arch/s390/include/asm/atomic.h
4238 +++ b/arch/s390/include/asm/atomic.h
4239 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4240 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4241 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4242
4243 +#define atomic64_read_unchecked(v) atomic64_read(v)
4244 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4245 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4246 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4247 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4248 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4249 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4250 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4251 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4252 +
4253 #define smp_mb__before_atomic_dec() smp_mb()
4254 #define smp_mb__after_atomic_dec() smp_mb()
4255 #define smp_mb__before_atomic_inc() smp_mb()
4256 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4257 index 2a30d5a..5e5586f 100644
4258 --- a/arch/s390/include/asm/cache.h
4259 +++ b/arch/s390/include/asm/cache.h
4260 @@ -11,8 +11,10 @@
4261 #ifndef __ARCH_S390_CACHE_H
4262 #define __ARCH_S390_CACHE_H
4263
4264 -#define L1_CACHE_BYTES 256
4265 +#include <linux/const.h>
4266 +
4267 #define L1_CACHE_SHIFT 8
4268 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4269 #define NET_SKB_PAD 32
4270
4271 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4272 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4273 index 547f1a6..0b22b53 100644
4274 --- a/arch/s390/include/asm/elf.h
4275 +++ b/arch/s390/include/asm/elf.h
4276 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4277 the loader. We need to make sure that it is out of the way of the program
4278 that it will "exec", and that there is sufficient room for the brk. */
4279
4280 -extern unsigned long randomize_et_dyn(unsigned long base);
4281 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4282 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4283 +
4284 +#ifdef CONFIG_PAX_ASLR
4285 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4286 +
4287 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4288 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4289 +#endif
4290
4291 /* This yields a mask that user programs can use to figure out what
4292 instruction set this CPU supports. */
4293 @@ -211,7 +217,4 @@ struct linux_binprm;
4294 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4295 int arch_setup_additional_pages(struct linux_binprm *, int);
4296
4297 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4298 -#define arch_randomize_brk arch_randomize_brk
4299 -
4300 #endif
4301 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4302 index d73cc6b..1a296ad 100644
4303 --- a/arch/s390/include/asm/system.h
4304 +++ b/arch/s390/include/asm/system.h
4305 @@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4306 extern void (*_machine_halt)(void);
4307 extern void (*_machine_power_off)(void);
4308
4309 -extern unsigned long arch_align_stack(unsigned long sp);
4310 +#define arch_align_stack(x) ((x) & ~0xfUL)
4311
4312 static inline int tprot(unsigned long addr)
4313 {
4314 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4315 index 2b23885..e136e31 100644
4316 --- a/arch/s390/include/asm/uaccess.h
4317 +++ b/arch/s390/include/asm/uaccess.h
4318 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4319 copy_to_user(void __user *to, const void *from, unsigned long n)
4320 {
4321 might_fault();
4322 +
4323 + if ((long)n < 0)
4324 + return n;
4325 +
4326 if (access_ok(VERIFY_WRITE, to, n))
4327 n = __copy_to_user(to, from, n);
4328 return n;
4329 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4330 static inline unsigned long __must_check
4331 __copy_from_user(void *to, const void __user *from, unsigned long n)
4332 {
4333 + if ((long)n < 0)
4334 + return n;
4335 +
4336 if (__builtin_constant_p(n) && (n <= 256))
4337 return uaccess.copy_from_user_small(n, from, to);
4338 else
4339 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4340 unsigned int sz = __compiletime_object_size(to);
4341
4342 might_fault();
4343 +
4344 + if ((long)n < 0)
4345 + return n;
4346 +
4347 if (unlikely(sz != -1 && sz < n)) {
4348 copy_from_user_overflow();
4349 return n;
4350 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4351 index dfcb343..eda788a 100644
4352 --- a/arch/s390/kernel/module.c
4353 +++ b/arch/s390/kernel/module.c
4354 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4355
4356 /* Increase core size by size of got & plt and set start
4357 offsets for got and plt. */
4358 - me->core_size = ALIGN(me->core_size, 4);
4359 - me->arch.got_offset = me->core_size;
4360 - me->core_size += me->arch.got_size;
4361 - me->arch.plt_offset = me->core_size;
4362 - me->core_size += me->arch.plt_size;
4363 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4364 + me->arch.got_offset = me->core_size_rw;
4365 + me->core_size_rw += me->arch.got_size;
4366 + me->arch.plt_offset = me->core_size_rx;
4367 + me->core_size_rx += me->arch.plt_size;
4368 return 0;
4369 }
4370
4371 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4372 if (info->got_initialized == 0) {
4373 Elf_Addr *gotent;
4374
4375 - gotent = me->module_core + me->arch.got_offset +
4376 + gotent = me->module_core_rw + me->arch.got_offset +
4377 info->got_offset;
4378 *gotent = val;
4379 info->got_initialized = 1;
4380 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4381 else if (r_type == R_390_GOTENT ||
4382 r_type == R_390_GOTPLTENT)
4383 *(unsigned int *) loc =
4384 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4385 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4386 else if (r_type == R_390_GOT64 ||
4387 r_type == R_390_GOTPLT64)
4388 *(unsigned long *) loc = val;
4389 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4390 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4391 if (info->plt_initialized == 0) {
4392 unsigned int *ip;
4393 - ip = me->module_core + me->arch.plt_offset +
4394 + ip = me->module_core_rx + me->arch.plt_offset +
4395 info->plt_offset;
4396 #ifndef CONFIG_64BIT
4397 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4398 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4399 val - loc + 0xffffUL < 0x1ffffeUL) ||
4400 (r_type == R_390_PLT32DBL &&
4401 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4402 - val = (Elf_Addr) me->module_core +
4403 + val = (Elf_Addr) me->module_core_rx +
4404 me->arch.plt_offset +
4405 info->plt_offset;
4406 val += rela->r_addend - loc;
4407 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4408 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4409 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4410 val = val + rela->r_addend -
4411 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4412 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4413 if (r_type == R_390_GOTOFF16)
4414 *(unsigned short *) loc = val;
4415 else if (r_type == R_390_GOTOFF32)
4416 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4417 break;
4418 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4419 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4420 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4421 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4422 rela->r_addend - loc;
4423 if (r_type == R_390_GOTPC)
4424 *(unsigned int *) loc = val;
4425 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4426 index e795933..b32563c 100644
4427 --- a/arch/s390/kernel/process.c
4428 +++ b/arch/s390/kernel/process.c
4429 @@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4430 }
4431 return 0;
4432 }
4433 -
4434 -unsigned long arch_align_stack(unsigned long sp)
4435 -{
4436 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4437 - sp -= get_random_int() & ~PAGE_MASK;
4438 - return sp & ~0xf;
4439 -}
4440 -
4441 -static inline unsigned long brk_rnd(void)
4442 -{
4443 - /* 8MB for 32bit, 1GB for 64bit */
4444 - if (is_32bit_task())
4445 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4446 - else
4447 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4448 -}
4449 -
4450 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4451 -{
4452 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4453 -
4454 - if (ret < mm->brk)
4455 - return mm->brk;
4456 - return ret;
4457 -}
4458 -
4459 -unsigned long randomize_et_dyn(unsigned long base)
4460 -{
4461 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4462 -
4463 - if (!(current->flags & PF_RANDOMIZE))
4464 - return base;
4465 - if (ret < base)
4466 - return base;
4467 - return ret;
4468 -}
4469 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4470 index a0155c0..34cc491 100644
4471 --- a/arch/s390/mm/mmap.c
4472 +++ b/arch/s390/mm/mmap.c
4473 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4474 */
4475 if (mmap_is_legacy()) {
4476 mm->mmap_base = TASK_UNMAPPED_BASE;
4477 +
4478 +#ifdef CONFIG_PAX_RANDMMAP
4479 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4480 + mm->mmap_base += mm->delta_mmap;
4481 +#endif
4482 +
4483 mm->get_unmapped_area = arch_get_unmapped_area;
4484 mm->unmap_area = arch_unmap_area;
4485 } else {
4486 mm->mmap_base = mmap_base();
4487 +
4488 +#ifdef CONFIG_PAX_RANDMMAP
4489 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4490 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4491 +#endif
4492 +
4493 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4494 mm->unmap_area = arch_unmap_area_topdown;
4495 }
4496 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4497 */
4498 if (mmap_is_legacy()) {
4499 mm->mmap_base = TASK_UNMAPPED_BASE;
4500 +
4501 +#ifdef CONFIG_PAX_RANDMMAP
4502 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4503 + mm->mmap_base += mm->delta_mmap;
4504 +#endif
4505 +
4506 mm->get_unmapped_area = s390_get_unmapped_area;
4507 mm->unmap_area = arch_unmap_area;
4508 } else {
4509 mm->mmap_base = mmap_base();
4510 +
4511 +#ifdef CONFIG_PAX_RANDMMAP
4512 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4513 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4514 +#endif
4515 +
4516 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4517 mm->unmap_area = arch_unmap_area_topdown;
4518 }
4519 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4520 index ae3d59f..f65f075 100644
4521 --- a/arch/score/include/asm/cache.h
4522 +++ b/arch/score/include/asm/cache.h
4523 @@ -1,7 +1,9 @@
4524 #ifndef _ASM_SCORE_CACHE_H
4525 #define _ASM_SCORE_CACHE_H
4526
4527 +#include <linux/const.h>
4528 +
4529 #define L1_CACHE_SHIFT 4
4530 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4531 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4532
4533 #endif /* _ASM_SCORE_CACHE_H */
4534 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4535 index 589d5c7..669e274 100644
4536 --- a/arch/score/include/asm/system.h
4537 +++ b/arch/score/include/asm/system.h
4538 @@ -17,7 +17,7 @@ do { \
4539 #define finish_arch_switch(prev) do {} while (0)
4540
4541 typedef void (*vi_handler_t)(void);
4542 -extern unsigned long arch_align_stack(unsigned long sp);
4543 +#define arch_align_stack(x) (x)
4544
4545 #define mb() barrier()
4546 #define rmb() barrier()
4547 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4548 index 25d0803..d6c8e36 100644
4549 --- a/arch/score/kernel/process.c
4550 +++ b/arch/score/kernel/process.c
4551 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4552
4553 return task_pt_regs(task)->cp0_epc;
4554 }
4555 -
4556 -unsigned long arch_align_stack(unsigned long sp)
4557 -{
4558 - return sp;
4559 -}
4560 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4561 index ef9e555..331bd29 100644
4562 --- a/arch/sh/include/asm/cache.h
4563 +++ b/arch/sh/include/asm/cache.h
4564 @@ -9,10 +9,11 @@
4565 #define __ASM_SH_CACHE_H
4566 #ifdef __KERNEL__
4567
4568 +#include <linux/const.h>
4569 #include <linux/init.h>
4570 #include <cpu/cache.h>
4571
4572 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4573 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4574
4575 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4576
4577 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4578 index afeb710..d1d1289 100644
4579 --- a/arch/sh/mm/mmap.c
4580 +++ b/arch/sh/mm/mmap.c
4581 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4582 addr = PAGE_ALIGN(addr);
4583
4584 vma = find_vma(mm, addr);
4585 - if (TASK_SIZE - len >= addr &&
4586 - (!vma || addr + len <= vma->vm_start))
4587 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4588 return addr;
4589 }
4590
4591 @@ -106,7 +105,7 @@ full_search:
4592 }
4593 return -ENOMEM;
4594 }
4595 - if (likely(!vma || addr + len <= vma->vm_start)) {
4596 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4597 /*
4598 * Remember the place where we stopped the search:
4599 */
4600 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4601 addr = PAGE_ALIGN(addr);
4602
4603 vma = find_vma(mm, addr);
4604 - if (TASK_SIZE - len >= addr &&
4605 - (!vma || addr + len <= vma->vm_start))
4606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4607 return addr;
4608 }
4609
4610 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4611 /* make sure it can fit in the remaining address space */
4612 if (likely(addr > len)) {
4613 vma = find_vma(mm, addr-len);
4614 - if (!vma || addr <= vma->vm_start) {
4615 + if (check_heap_stack_gap(vma, addr - len, len)) {
4616 /* remember the address as a hint for next time */
4617 return (mm->free_area_cache = addr-len);
4618 }
4619 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4620 if (unlikely(mm->mmap_base < len))
4621 goto bottomup;
4622
4623 - addr = mm->mmap_base-len;
4624 - if (do_colour_align)
4625 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4626 + addr = mm->mmap_base - len;
4627
4628 do {
4629 + if (do_colour_align)
4630 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4631 /*
4632 * Lookup failure means no vma is above this address,
4633 * else if new region fits below vma->vm_start,
4634 * return with success:
4635 */
4636 vma = find_vma(mm, addr);
4637 - if (likely(!vma || addr+len <= vma->vm_start)) {
4638 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4639 /* remember the address as a hint for next time */
4640 return (mm->free_area_cache = addr);
4641 }
4642 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4643 mm->cached_hole_size = vma->vm_start - addr;
4644
4645 /* try just below the current vma->vm_start */
4646 - addr = vma->vm_start-len;
4647 - if (do_colour_align)
4648 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4649 - } while (likely(len < vma->vm_start));
4650 + addr = skip_heap_stack_gap(vma, len);
4651 + } while (!IS_ERR_VALUE(addr));
4652
4653 bottomup:
4654 /*
4655 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4656 index eddcfb3..b117d90 100644
4657 --- a/arch/sparc/Makefile
4658 +++ b/arch/sparc/Makefile
4659 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4660 # Export what is needed by arch/sparc/boot/Makefile
4661 export VMLINUX_INIT VMLINUX_MAIN
4662 VMLINUX_INIT := $(head-y) $(init-y)
4663 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4664 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4665 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4666 VMLINUX_MAIN += $(drivers-y) $(net-y)
4667
4668 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4669 index 9f421df..b81fc12 100644
4670 --- a/arch/sparc/include/asm/atomic_64.h
4671 +++ b/arch/sparc/include/asm/atomic_64.h
4672 @@ -14,18 +14,40 @@
4673 #define ATOMIC64_INIT(i) { (i) }
4674
4675 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4676 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4677 +{
4678 + return v->counter;
4679 +}
4680 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4681 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4682 +{
4683 + return v->counter;
4684 +}
4685
4686 #define atomic_set(v, i) (((v)->counter) = i)
4687 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4688 +{
4689 + v->counter = i;
4690 +}
4691 #define atomic64_set(v, i) (((v)->counter) = i)
4692 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4693 +{
4694 + v->counter = i;
4695 +}
4696
4697 extern void atomic_add(int, atomic_t *);
4698 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4699 extern void atomic64_add(long, atomic64_t *);
4700 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4701 extern void atomic_sub(int, atomic_t *);
4702 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4703 extern void atomic64_sub(long, atomic64_t *);
4704 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4705
4706 extern int atomic_add_ret(int, atomic_t *);
4707 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4708 extern long atomic64_add_ret(long, atomic64_t *);
4709 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4710 extern int atomic_sub_ret(int, atomic_t *);
4711 extern long atomic64_sub_ret(long, atomic64_t *);
4712
4713 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4714 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4715
4716 #define atomic_inc_return(v) atomic_add_ret(1, v)
4717 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4718 +{
4719 + return atomic_add_ret_unchecked(1, v);
4720 +}
4721 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4722 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4723 +{
4724 + return atomic64_add_ret_unchecked(1, v);
4725 +}
4726
4727 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4728 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4729
4730 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4731 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4732 +{
4733 + return atomic_add_ret_unchecked(i, v);
4734 +}
4735 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4736 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4737 +{
4738 + return atomic64_add_ret_unchecked(i, v);
4739 +}
4740
4741 /*
4742 * atomic_inc_and_test - increment and test
4743 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4744 * other cases.
4745 */
4746 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4747 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4748 +{
4749 + return atomic_inc_return_unchecked(v) == 0;
4750 +}
4751 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4752
4753 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4754 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4755 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4756
4757 #define atomic_inc(v) atomic_add(1, v)
4758 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4759 +{
4760 + atomic_add_unchecked(1, v);
4761 +}
4762 #define atomic64_inc(v) atomic64_add(1, v)
4763 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4764 +{
4765 + atomic64_add_unchecked(1, v);
4766 +}
4767
4768 #define atomic_dec(v) atomic_sub(1, v)
4769 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4770 +{
4771 + atomic_sub_unchecked(1, v);
4772 +}
4773 #define atomic64_dec(v) atomic64_sub(1, v)
4774 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4775 +{
4776 + atomic64_sub_unchecked(1, v);
4777 +}
4778
4779 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4780 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4781
4782 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4783 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4784 +{
4785 + return cmpxchg(&v->counter, old, new);
4786 +}
4787 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4788 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4789 +{
4790 + return xchg(&v->counter, new);
4791 +}
4792
4793 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4794 {
4795 - int c, old;
4796 + int c, old, new;
4797 c = atomic_read(v);
4798 for (;;) {
4799 - if (unlikely(c == (u)))
4800 + if (unlikely(c == u))
4801 break;
4802 - old = atomic_cmpxchg((v), c, c + (a));
4803 +
4804 + asm volatile("addcc %2, %0, %0\n"
4805 +
4806 +#ifdef CONFIG_PAX_REFCOUNT
4807 + "tvs %%icc, 6\n"
4808 +#endif
4809 +
4810 + : "=r" (new)
4811 + : "0" (c), "ir" (a)
4812 + : "cc");
4813 +
4814 + old = atomic_cmpxchg(v, c, new);
4815 if (likely(old == c))
4816 break;
4817 c = old;
4818 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4819 #define atomic64_cmpxchg(v, o, n) \
4820 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4821 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4822 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4823 +{
4824 + return xchg(&v->counter, new);
4825 +}
4826
4827 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4828 {
4829 - long c, old;
4830 + long c, old, new;
4831 c = atomic64_read(v);
4832 for (;;) {
4833 - if (unlikely(c == (u)))
4834 + if (unlikely(c == u))
4835 break;
4836 - old = atomic64_cmpxchg((v), c, c + (a));
4837 +
4838 + asm volatile("addcc %2, %0, %0\n"
4839 +
4840 +#ifdef CONFIG_PAX_REFCOUNT
4841 + "tvs %%xcc, 6\n"
4842 +#endif
4843 +
4844 + : "=r" (new)
4845 + : "0" (c), "ir" (a)
4846 + : "cc");
4847 +
4848 + old = atomic64_cmpxchg(v, c, new);
4849 if (likely(old == c))
4850 break;
4851 c = old;
4852 }
4853 - return c != (u);
4854 + return c != u;
4855 }
4856
4857 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4858 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4859 index 69358b5..9d0d492 100644
4860 --- a/arch/sparc/include/asm/cache.h
4861 +++ b/arch/sparc/include/asm/cache.h
4862 @@ -7,10 +7,12 @@
4863 #ifndef _SPARC_CACHE_H
4864 #define _SPARC_CACHE_H
4865
4866 +#include <linux/const.h>
4867 +
4868 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4869
4870 #define L1_CACHE_SHIFT 5
4871 -#define L1_CACHE_BYTES 32
4872 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4873
4874 #ifdef CONFIG_SPARC32
4875 #define SMP_CACHE_BYTES_SHIFT 5
4876 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4877 index 4269ca6..e3da77f 100644
4878 --- a/arch/sparc/include/asm/elf_32.h
4879 +++ b/arch/sparc/include/asm/elf_32.h
4880 @@ -114,6 +114,13 @@ typedef struct {
4881
4882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4883
4884 +#ifdef CONFIG_PAX_ASLR
4885 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4886 +
4887 +#define PAX_DELTA_MMAP_LEN 16
4888 +#define PAX_DELTA_STACK_LEN 16
4889 +#endif
4890 +
4891 /* This yields a mask that user programs can use to figure out what
4892 instruction set this cpu supports. This can NOT be done in userspace
4893 on Sparc. */
4894 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4895 index 7df8b7f..4946269 100644
4896 --- a/arch/sparc/include/asm/elf_64.h
4897 +++ b/arch/sparc/include/asm/elf_64.h
4898 @@ -180,6 +180,13 @@ typedef struct {
4899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4901
4902 +#ifdef CONFIG_PAX_ASLR
4903 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4904 +
4905 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4906 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4907 +#endif
4908 +
4909 extern unsigned long sparc64_elf_hwcap;
4910 #define ELF_HWCAP sparc64_elf_hwcap
4911
4912 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4913 index a790cc6..091ed94 100644
4914 --- a/arch/sparc/include/asm/pgtable_32.h
4915 +++ b/arch/sparc/include/asm/pgtable_32.h
4916 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4917 BTFIXUPDEF_INT(page_none)
4918 BTFIXUPDEF_INT(page_copy)
4919 BTFIXUPDEF_INT(page_readonly)
4920 +
4921 +#ifdef CONFIG_PAX_PAGEEXEC
4922 +BTFIXUPDEF_INT(page_shared_noexec)
4923 +BTFIXUPDEF_INT(page_copy_noexec)
4924 +BTFIXUPDEF_INT(page_readonly_noexec)
4925 +#endif
4926 +
4927 BTFIXUPDEF_INT(page_kernel)
4928
4929 #define PMD_SHIFT SUN4C_PMD_SHIFT
4930 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4931 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4932 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4933
4934 +#ifdef CONFIG_PAX_PAGEEXEC
4935 +extern pgprot_t PAGE_SHARED_NOEXEC;
4936 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4937 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4938 +#else
4939 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4940 +# define PAGE_COPY_NOEXEC PAGE_COPY
4941 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4942 +#endif
4943 +
4944 extern unsigned long page_kernel;
4945
4946 #ifdef MODULE
4947 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4948 index f6ae2b2..b03ffc7 100644
4949 --- a/arch/sparc/include/asm/pgtsrmmu.h
4950 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4951 @@ -115,6 +115,13 @@
4952 SRMMU_EXEC | SRMMU_REF)
4953 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4954 SRMMU_EXEC | SRMMU_REF)
4955 +
4956 +#ifdef CONFIG_PAX_PAGEEXEC
4957 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4958 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4959 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4960 +#endif
4961 +
4962 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4963 SRMMU_DIRTY | SRMMU_REF)
4964
4965 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4966 index 9689176..63c18ea 100644
4967 --- a/arch/sparc/include/asm/spinlock_64.h
4968 +++ b/arch/sparc/include/asm/spinlock_64.h
4969 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4970
4971 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4972
4973 -static void inline arch_read_lock(arch_rwlock_t *lock)
4974 +static inline void arch_read_lock(arch_rwlock_t *lock)
4975 {
4976 unsigned long tmp1, tmp2;
4977
4978 __asm__ __volatile__ (
4979 "1: ldsw [%2], %0\n"
4980 " brlz,pn %0, 2f\n"
4981 -"4: add %0, 1, %1\n"
4982 +"4: addcc %0, 1, %1\n"
4983 +
4984 +#ifdef CONFIG_PAX_REFCOUNT
4985 +" tvs %%icc, 6\n"
4986 +#endif
4987 +
4988 " cas [%2], %0, %1\n"
4989 " cmp %0, %1\n"
4990 " bne,pn %%icc, 1b\n"
4991 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
4992 " .previous"
4993 : "=&r" (tmp1), "=&r" (tmp2)
4994 : "r" (lock)
4995 - : "memory");
4996 + : "memory", "cc");
4997 }
4998
4999 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5000 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5001 {
5002 int tmp1, tmp2;
5003
5004 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5005 "1: ldsw [%2], %0\n"
5006 " brlz,a,pn %0, 2f\n"
5007 " mov 0, %0\n"
5008 -" add %0, 1, %1\n"
5009 +" addcc %0, 1, %1\n"
5010 +
5011 +#ifdef CONFIG_PAX_REFCOUNT
5012 +" tvs %%icc, 6\n"
5013 +#endif
5014 +
5015 " cas [%2], %0, %1\n"
5016 " cmp %0, %1\n"
5017 " bne,pn %%icc, 1b\n"
5018 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5019 return tmp1;
5020 }
5021
5022 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5023 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5024 {
5025 unsigned long tmp1, tmp2;
5026
5027 __asm__ __volatile__(
5028 "1: lduw [%2], %0\n"
5029 -" sub %0, 1, %1\n"
5030 +" subcc %0, 1, %1\n"
5031 +
5032 +#ifdef CONFIG_PAX_REFCOUNT
5033 +" tvs %%icc, 6\n"
5034 +#endif
5035 +
5036 " cas [%2], %0, %1\n"
5037 " cmp %0, %1\n"
5038 " bne,pn %%xcc, 1b\n"
5039 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5040 : "memory");
5041 }
5042
5043 -static void inline arch_write_lock(arch_rwlock_t *lock)
5044 +static inline void arch_write_lock(arch_rwlock_t *lock)
5045 {
5046 unsigned long mask, tmp1, tmp2;
5047
5048 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5049 : "memory");
5050 }
5051
5052 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5053 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5054 {
5055 __asm__ __volatile__(
5056 " stw %%g0, [%0]"
5057 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5058 : "memory");
5059 }
5060
5061 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5062 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5063 {
5064 unsigned long mask, tmp1, tmp2, result;
5065
5066 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5067 index c2a1080..21ed218 100644
5068 --- a/arch/sparc/include/asm/thread_info_32.h
5069 +++ b/arch/sparc/include/asm/thread_info_32.h
5070 @@ -50,6 +50,8 @@ struct thread_info {
5071 unsigned long w_saved;
5072
5073 struct restart_block restart_block;
5074 +
5075 + unsigned long lowest_stack;
5076 };
5077
5078 /*
5079 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5080 index 01d057f..0a02f7e 100644
5081 --- a/arch/sparc/include/asm/thread_info_64.h
5082 +++ b/arch/sparc/include/asm/thread_info_64.h
5083 @@ -63,6 +63,8 @@ struct thread_info {
5084 struct pt_regs *kern_una_regs;
5085 unsigned int kern_una_insn;
5086
5087 + unsigned long lowest_stack;
5088 +
5089 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5090 };
5091
5092 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5093 index e88fbe5..96b0ce5 100644
5094 --- a/arch/sparc/include/asm/uaccess.h
5095 +++ b/arch/sparc/include/asm/uaccess.h
5096 @@ -1,5 +1,13 @@
5097 #ifndef ___ASM_SPARC_UACCESS_H
5098 #define ___ASM_SPARC_UACCESS_H
5099 +
5100 +#ifdef __KERNEL__
5101 +#ifndef __ASSEMBLY__
5102 +#include <linux/types.h>
5103 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5104 +#endif
5105 +#endif
5106 +
5107 #if defined(__sparc__) && defined(__arch64__)
5108 #include <asm/uaccess_64.h>
5109 #else
5110 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5111 index 8303ac4..07f333d 100644
5112 --- a/arch/sparc/include/asm/uaccess_32.h
5113 +++ b/arch/sparc/include/asm/uaccess_32.h
5114 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5115
5116 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5117 {
5118 - if (n && __access_ok((unsigned long) to, n))
5119 + if ((long)n < 0)
5120 + return n;
5121 +
5122 + if (n && __access_ok((unsigned long) to, n)) {
5123 + if (!__builtin_constant_p(n))
5124 + check_object_size(from, n, true);
5125 return __copy_user(to, (__force void __user *) from, n);
5126 - else
5127 + } else
5128 return n;
5129 }
5130
5131 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5132 {
5133 + if ((long)n < 0)
5134 + return n;
5135 +
5136 + if (!__builtin_constant_p(n))
5137 + check_object_size(from, n, true);
5138 +
5139 return __copy_user(to, (__force void __user *) from, n);
5140 }
5141
5142 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5143 {
5144 - if (n && __access_ok((unsigned long) from, n))
5145 + if ((long)n < 0)
5146 + return n;
5147 +
5148 + if (n && __access_ok((unsigned long) from, n)) {
5149 + if (!__builtin_constant_p(n))
5150 + check_object_size(to, n, false);
5151 return __copy_user((__force void __user *) to, from, n);
5152 - else
5153 + } else
5154 return n;
5155 }
5156
5157 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5158 {
5159 + if ((long)n < 0)
5160 + return n;
5161 +
5162 return __copy_user((__force void __user *) to, from, n);
5163 }
5164
5165 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5166 index 3e1449f..5293a0e 100644
5167 --- a/arch/sparc/include/asm/uaccess_64.h
5168 +++ b/arch/sparc/include/asm/uaccess_64.h
5169 @@ -10,6 +10,7 @@
5170 #include <linux/compiler.h>
5171 #include <linux/string.h>
5172 #include <linux/thread_info.h>
5173 +#include <linux/kernel.h>
5174 #include <asm/asi.h>
5175 #include <asm/system.h>
5176 #include <asm/spitfire.h>
5177 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5178 static inline unsigned long __must_check
5179 copy_from_user(void *to, const void __user *from, unsigned long size)
5180 {
5181 - unsigned long ret = ___copy_from_user(to, from, size);
5182 + unsigned long ret;
5183
5184 + if ((long)size < 0 || size > INT_MAX)
5185 + return size;
5186 +
5187 + if (!__builtin_constant_p(size))
5188 + check_object_size(to, size, false);
5189 +
5190 + ret = ___copy_from_user(to, from, size);
5191 if (unlikely(ret))
5192 ret = copy_from_user_fixup(to, from, size);
5193
5194 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5195 static inline unsigned long __must_check
5196 copy_to_user(void __user *to, const void *from, unsigned long size)
5197 {
5198 - unsigned long ret = ___copy_to_user(to, from, size);
5199 + unsigned long ret;
5200
5201 + if ((long)size < 0 || size > INT_MAX)
5202 + return size;
5203 +
5204 + if (!__builtin_constant_p(size))
5205 + check_object_size(from, size, true);
5206 +
5207 + ret = ___copy_to_user(to, from, size);
5208 if (unlikely(ret))
5209 ret = copy_to_user_fixup(to, from, size);
5210 return ret;
5211 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5212 index cb85458..e063f17 100644
5213 --- a/arch/sparc/kernel/Makefile
5214 +++ b/arch/sparc/kernel/Makefile
5215 @@ -3,7 +3,7 @@
5216 #
5217
5218 asflags-y := -ansi
5219 -ccflags-y := -Werror
5220 +#ccflags-y := -Werror
5221
5222 extra-y := head_$(BITS).o
5223 extra-y += init_task.o
5224 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5225 index f793742..4d880af 100644
5226 --- a/arch/sparc/kernel/process_32.c
5227 +++ b/arch/sparc/kernel/process_32.c
5228 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5229 rw->ins[4], rw->ins[5],
5230 rw->ins[6],
5231 rw->ins[7]);
5232 - printk("%pS\n", (void *) rw->ins[7]);
5233 + printk("%pA\n", (void *) rw->ins[7]);
5234 rw = (struct reg_window32 *) rw->ins[6];
5235 }
5236 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5237 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5238
5239 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5240 r->psr, r->pc, r->npc, r->y, print_tainted());
5241 - printk("PC: <%pS>\n", (void *) r->pc);
5242 + printk("PC: <%pA>\n", (void *) r->pc);
5243 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5244 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5245 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5246 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5247 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5248 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5249 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5250 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5251
5252 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5253 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5254 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5255 rw = (struct reg_window32 *) fp;
5256 pc = rw->ins[7];
5257 printk("[%08lx : ", pc);
5258 - printk("%pS ] ", (void *) pc);
5259 + printk("%pA ] ", (void *) pc);
5260 fp = rw->ins[6];
5261 } while (++count < 16);
5262 printk("\n");
5263 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5264 index 39d8b05..d1a7d90 100644
5265 --- a/arch/sparc/kernel/process_64.c
5266 +++ b/arch/sparc/kernel/process_64.c
5267 @@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5268 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5269 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5270 if (regs->tstate & TSTATE_PRIV)
5271 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5272 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5273 }
5274
5275 void show_regs(struct pt_regs *regs)
5276 {
5277 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5278 regs->tpc, regs->tnpc, regs->y, print_tainted());
5279 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5280 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5281 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5282 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5283 regs->u_regs[3]);
5284 @@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5285 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5286 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5287 regs->u_regs[15]);
5288 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5289 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5290 show_regwindow(regs);
5291 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5292 }
5293 @@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5294 ((tp && tp->task) ? tp->task->pid : -1));
5295
5296 if (gp->tstate & TSTATE_PRIV) {
5297 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5298 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5299 (void *) gp->tpc,
5300 (void *) gp->o7,
5301 (void *) gp->i7,
5302 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5303 index 42b282f..28ce9f2 100644
5304 --- a/arch/sparc/kernel/sys_sparc_32.c
5305 +++ b/arch/sparc/kernel/sys_sparc_32.c
5306 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5307 if (ARCH_SUN4C && len > 0x20000000)
5308 return -ENOMEM;
5309 if (!addr)
5310 - addr = TASK_UNMAPPED_BASE;
5311 + addr = current->mm->mmap_base;
5312
5313 if (flags & MAP_SHARED)
5314 addr = COLOUR_ALIGN(addr);
5315 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5316 }
5317 if (TASK_SIZE - PAGE_SIZE - len < addr)
5318 return -ENOMEM;
5319 - if (!vmm || addr + len <= vmm->vm_start)
5320 + if (check_heap_stack_gap(vmm, addr, len))
5321 return addr;
5322 addr = vmm->vm_end;
5323 if (flags & MAP_SHARED)
5324 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5325 index 232df99..cee1f9c 100644
5326 --- a/arch/sparc/kernel/sys_sparc_64.c
5327 +++ b/arch/sparc/kernel/sys_sparc_64.c
5328 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5329 /* We do not accept a shared mapping if it would violate
5330 * cache aliasing constraints.
5331 */
5332 - if ((flags & MAP_SHARED) &&
5333 + if ((filp || (flags & MAP_SHARED)) &&
5334 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5335 return -EINVAL;
5336 return addr;
5337 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5338 if (filp || (flags & MAP_SHARED))
5339 do_color_align = 1;
5340
5341 +#ifdef CONFIG_PAX_RANDMMAP
5342 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5343 +#endif
5344 +
5345 if (addr) {
5346 if (do_color_align)
5347 addr = COLOUR_ALIGN(addr, pgoff);
5348 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5349 addr = PAGE_ALIGN(addr);
5350
5351 vma = find_vma(mm, addr);
5352 - if (task_size - len >= addr &&
5353 - (!vma || addr + len <= vma->vm_start))
5354 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5355 return addr;
5356 }
5357
5358 if (len > mm->cached_hole_size) {
5359 - start_addr = addr = mm->free_area_cache;
5360 + start_addr = addr = mm->free_area_cache;
5361 } else {
5362 - start_addr = addr = TASK_UNMAPPED_BASE;
5363 + start_addr = addr = mm->mmap_base;
5364 mm->cached_hole_size = 0;
5365 }
5366
5367 @@ -174,14 +177,14 @@ full_search:
5368 vma = find_vma(mm, VA_EXCLUDE_END);
5369 }
5370 if (unlikely(task_size < addr)) {
5371 - if (start_addr != TASK_UNMAPPED_BASE) {
5372 - start_addr = addr = TASK_UNMAPPED_BASE;
5373 + if (start_addr != mm->mmap_base) {
5374 + start_addr = addr = mm->mmap_base;
5375 mm->cached_hole_size = 0;
5376 goto full_search;
5377 }
5378 return -ENOMEM;
5379 }
5380 - if (likely(!vma || addr + len <= vma->vm_start)) {
5381 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5382 /*
5383 * Remember the place where we stopped the search:
5384 */
5385 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5386 /* We do not accept a shared mapping if it would violate
5387 * cache aliasing constraints.
5388 */
5389 - if ((flags & MAP_SHARED) &&
5390 + if ((filp || (flags & MAP_SHARED)) &&
5391 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5392 return -EINVAL;
5393 return addr;
5394 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 addr = PAGE_ALIGN(addr);
5396
5397 vma = find_vma(mm, addr);
5398 - if (task_size - len >= addr &&
5399 - (!vma || addr + len <= vma->vm_start))
5400 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5401 return addr;
5402 }
5403
5404 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5405 /* make sure it can fit in the remaining address space */
5406 if (likely(addr > len)) {
5407 vma = find_vma(mm, addr-len);
5408 - if (!vma || addr <= vma->vm_start) {
5409 + if (check_heap_stack_gap(vma, addr - len, len)) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr-len);
5412 }
5413 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 if (unlikely(mm->mmap_base < len))
5415 goto bottomup;
5416
5417 - addr = mm->mmap_base-len;
5418 - if (do_color_align)
5419 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5420 + addr = mm->mmap_base - len;
5421
5422 do {
5423 + if (do_color_align)
5424 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5425 /*
5426 * Lookup failure means no vma is above this address,
5427 * else if new region fits below vma->vm_start,
5428 * return with success:
5429 */
5430 vma = find_vma(mm, addr);
5431 - if (likely(!vma || addr+len <= vma->vm_start)) {
5432 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5433 /* remember the address as a hint for next time */
5434 return (mm->free_area_cache = addr);
5435 }
5436 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5437 mm->cached_hole_size = vma->vm_start - addr;
5438
5439 /* try just below the current vma->vm_start */
5440 - addr = vma->vm_start-len;
5441 - if (do_color_align)
5442 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5443 - } while (likely(len < vma->vm_start));
5444 + addr = skip_heap_stack_gap(vma, len);
5445 + } while (!IS_ERR_VALUE(addr));
5446
5447 bottomup:
5448 /*
5449 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5450 gap == RLIM_INFINITY ||
5451 sysctl_legacy_va_layout) {
5452 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5453 +
5454 +#ifdef CONFIG_PAX_RANDMMAP
5455 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5456 + mm->mmap_base += mm->delta_mmap;
5457 +#endif
5458 +
5459 mm->get_unmapped_area = arch_get_unmapped_area;
5460 mm->unmap_area = arch_unmap_area;
5461 } else {
5462 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5463 gap = (task_size / 6 * 5);
5464
5465 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5466 +
5467 +#ifdef CONFIG_PAX_RANDMMAP
5468 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5469 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5470 +#endif
5471 +
5472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5473 mm->unmap_area = arch_unmap_area_topdown;
5474 }
5475 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5476 index 591f20c..0f1b925 100644
5477 --- a/arch/sparc/kernel/traps_32.c
5478 +++ b/arch/sparc/kernel/traps_32.c
5479 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5480 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5481 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5482
5483 +extern void gr_handle_kernel_exploit(void);
5484 +
5485 void die_if_kernel(char *str, struct pt_regs *regs)
5486 {
5487 static int die_counter;
5488 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5489 count++ < 30 &&
5490 (((unsigned long) rw) >= PAGE_OFFSET) &&
5491 !(((unsigned long) rw) & 0x7)) {
5492 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5493 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5494 (void *) rw->ins[7]);
5495 rw = (struct reg_window32 *)rw->ins[6];
5496 }
5497 }
5498 printk("Instruction DUMP:");
5499 instruction_dump ((unsigned long *) regs->pc);
5500 - if(regs->psr & PSR_PS)
5501 + if(regs->psr & PSR_PS) {
5502 + gr_handle_kernel_exploit();
5503 do_exit(SIGKILL);
5504 + }
5505 do_exit(SIGSEGV);
5506 }
5507
5508 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5509 index 0cbdaa4..438e4c9 100644
5510 --- a/arch/sparc/kernel/traps_64.c
5511 +++ b/arch/sparc/kernel/traps_64.c
5512 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5513 i + 1,
5514 p->trapstack[i].tstate, p->trapstack[i].tpc,
5515 p->trapstack[i].tnpc, p->trapstack[i].tt);
5516 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5517 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5518 }
5519 }
5520
5521 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5522
5523 lvl -= 0x100;
5524 if (regs->tstate & TSTATE_PRIV) {
5525 +
5526 +#ifdef CONFIG_PAX_REFCOUNT
5527 + if (lvl == 6)
5528 + pax_report_refcount_overflow(regs);
5529 +#endif
5530 +
5531 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5532 die_if_kernel(buffer, regs);
5533 }
5534 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5535 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5536 {
5537 char buffer[32];
5538 -
5539 +
5540 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5541 0, lvl, SIGTRAP) == NOTIFY_STOP)
5542 return;
5543
5544 +#ifdef CONFIG_PAX_REFCOUNT
5545 + if (lvl == 6)
5546 + pax_report_refcount_overflow(regs);
5547 +#endif
5548 +
5549 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5550
5551 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5552 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5553 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5554 printk("%s" "ERROR(%d): ",
5555 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5556 - printk("TPC<%pS>\n", (void *) regs->tpc);
5557 + printk("TPC<%pA>\n", (void *) regs->tpc);
5558 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5559 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5560 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5561 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5562 smp_processor_id(),
5563 (type & 0x1) ? 'I' : 'D',
5564 regs->tpc);
5565 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5566 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5567 panic("Irrecoverable Cheetah+ parity error.");
5568 }
5569
5570 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5571 smp_processor_id(),
5572 (type & 0x1) ? 'I' : 'D',
5573 regs->tpc);
5574 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5575 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5576 }
5577
5578 struct sun4v_error_entry {
5579 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5580
5581 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5582 regs->tpc, tl);
5583 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5584 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5585 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5586 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5587 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5588 (void *) regs->u_regs[UREG_I7]);
5589 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5590 "pte[%lx] error[%lx]\n",
5591 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5592
5593 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5594 regs->tpc, tl);
5595 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5596 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5597 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5598 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5599 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5600 (void *) regs->u_regs[UREG_I7]);
5601 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5602 "pte[%lx] error[%lx]\n",
5603 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5604 fp = (unsigned long)sf->fp + STACK_BIAS;
5605 }
5606
5607 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5608 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5609 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5610 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5611 int index = tsk->curr_ret_stack;
5612 if (tsk->ret_stack && index >= graph) {
5613 pc = tsk->ret_stack[index - graph].ret;
5614 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5615 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5616 graph++;
5617 }
5618 }
5619 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5620 return (struct reg_window *) (fp + STACK_BIAS);
5621 }
5622
5623 +extern void gr_handle_kernel_exploit(void);
5624 +
5625 void die_if_kernel(char *str, struct pt_regs *regs)
5626 {
5627 static int die_counter;
5628 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5629 while (rw &&
5630 count++ < 30 &&
5631 kstack_valid(tp, (unsigned long) rw)) {
5632 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5633 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5634 (void *) rw->ins[7]);
5635
5636 rw = kernel_stack_up(rw);
5637 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5638 }
5639 user_instruction_dump ((unsigned int __user *) regs->tpc);
5640 }
5641 - if (regs->tstate & TSTATE_PRIV)
5642 + if (regs->tstate & TSTATE_PRIV) {
5643 + gr_handle_kernel_exploit();
5644 do_exit(SIGKILL);
5645 + }
5646 do_exit(SIGSEGV);
5647 }
5648 EXPORT_SYMBOL(die_if_kernel);
5649 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5650 index 76e4ac1..78f8bb1 100644
5651 --- a/arch/sparc/kernel/unaligned_64.c
5652 +++ b/arch/sparc/kernel/unaligned_64.c
5653 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5654 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5655
5656 if (__ratelimit(&ratelimit)) {
5657 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5658 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5659 regs->tpc, (void *) regs->tpc);
5660 }
5661 }
5662 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5663 index a3fc437..fea9957 100644
5664 --- a/arch/sparc/lib/Makefile
5665 +++ b/arch/sparc/lib/Makefile
5666 @@ -2,7 +2,7 @@
5667 #
5668
5669 asflags-y := -ansi -DST_DIV0=0x02
5670 -ccflags-y := -Werror
5671 +#ccflags-y := -Werror
5672
5673 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5674 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5675 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5676 index 59186e0..f747d7a 100644
5677 --- a/arch/sparc/lib/atomic_64.S
5678 +++ b/arch/sparc/lib/atomic_64.S
5679 @@ -18,7 +18,12 @@
5680 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683 - add %g1, %o0, %g7
5684 + addcc %g1, %o0, %g7
5685 +
5686 +#ifdef CONFIG_PAX_REFCOUNT
5687 + tvs %icc, 6
5688 +#endif
5689 +
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5693 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_add, .-atomic_add
5696
5697 + .globl atomic_add_unchecked
5698 + .type atomic_add_unchecked,#function
5699 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5700 + BACKOFF_SETUP(%o2)
5701 +1: lduw [%o1], %g1
5702 + add %g1, %o0, %g7
5703 + cas [%o1], %g1, %g7
5704 + cmp %g1, %g7
5705 + bne,pn %icc, 2f
5706 + nop
5707 + retl
5708 + nop
5709 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5710 + .size atomic_add_unchecked, .-atomic_add_unchecked
5711 +
5712 .globl atomic_sub
5713 .type atomic_sub,#function
5714 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717 - sub %g1, %o0, %g7
5718 + subcc %g1, %o0, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5727 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_sub, .-atomic_sub
5730
5731 + .globl atomic_sub_unchecked
5732 + .type atomic_sub_unchecked,#function
5733 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5734 + BACKOFF_SETUP(%o2)
5735 +1: lduw [%o1], %g1
5736 + sub %g1, %o0, %g7
5737 + cas [%o1], %g1, %g7
5738 + cmp %g1, %g7
5739 + bne,pn %icc, 2f
5740 + nop
5741 + retl
5742 + nop
5743 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5744 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5745 +
5746 .globl atomic_add_ret
5747 .type atomic_add_ret,#function
5748 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5749 BACKOFF_SETUP(%o2)
5750 1: lduw [%o1], %g1
5751 - add %g1, %o0, %g7
5752 + addcc %g1, %o0, %g7
5753 +
5754 +#ifdef CONFIG_PAX_REFCOUNT
5755 + tvs %icc, 6
5756 +#endif
5757 +
5758 cas [%o1], %g1, %g7
5759 cmp %g1, %g7
5760 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5761 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5762 2: BACKOFF_SPIN(%o2, %o3, 1b)
5763 .size atomic_add_ret, .-atomic_add_ret
5764
5765 + .globl atomic_add_ret_unchecked
5766 + .type atomic_add_ret_unchecked,#function
5767 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5768 + BACKOFF_SETUP(%o2)
5769 +1: lduw [%o1], %g1
5770 + addcc %g1, %o0, %g7
5771 + cas [%o1], %g1, %g7
5772 + cmp %g1, %g7
5773 + bne,pn %icc, 2f
5774 + add %g7, %o0, %g7
5775 + sra %g7, 0, %o0
5776 + retl
5777 + nop
5778 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5779 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5780 +
5781 .globl atomic_sub_ret
5782 .type atomic_sub_ret,#function
5783 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5784 BACKOFF_SETUP(%o2)
5785 1: lduw [%o1], %g1
5786 - sub %g1, %o0, %g7
5787 + subcc %g1, %o0, %g7
5788 +
5789 +#ifdef CONFIG_PAX_REFCOUNT
5790 + tvs %icc, 6
5791 +#endif
5792 +
5793 cas [%o1], %g1, %g7
5794 cmp %g1, %g7
5795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5796 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5797 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800 - add %g1, %o0, %g7
5801 + addcc %g1, %o0, %g7
5802 +
5803 +#ifdef CONFIG_PAX_REFCOUNT
5804 + tvs %xcc, 6
5805 +#endif
5806 +
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5810 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_add, .-atomic64_add
5813
5814 + .globl atomic64_add_unchecked
5815 + .type atomic64_add_unchecked,#function
5816 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5817 + BACKOFF_SETUP(%o2)
5818 +1: ldx [%o1], %g1
5819 + addcc %g1, %o0, %g7
5820 + casx [%o1], %g1, %g7
5821 + cmp %g1, %g7
5822 + bne,pn %xcc, 2f
5823 + nop
5824 + retl
5825 + nop
5826 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5827 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5828 +
5829 .globl atomic64_sub
5830 .type atomic64_sub,#function
5831 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834 - sub %g1, %o0, %g7
5835 + subcc %g1, %o0, %g7
5836 +
5837 +#ifdef CONFIG_PAX_REFCOUNT
5838 + tvs %xcc, 6
5839 +#endif
5840 +
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5844 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_sub, .-atomic64_sub
5847
5848 + .globl atomic64_sub_unchecked
5849 + .type atomic64_sub_unchecked,#function
5850 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5851 + BACKOFF_SETUP(%o2)
5852 +1: ldx [%o1], %g1
5853 + subcc %g1, %o0, %g7
5854 + casx [%o1], %g1, %g7
5855 + cmp %g1, %g7
5856 + bne,pn %xcc, 2f
5857 + nop
5858 + retl
5859 + nop
5860 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5861 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5862 +
5863 .globl atomic64_add_ret
5864 .type atomic64_add_ret,#function
5865 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5866 BACKOFF_SETUP(%o2)
5867 1: ldx [%o1], %g1
5868 - add %g1, %o0, %g7
5869 + addcc %g1, %o0, %g7
5870 +
5871 +#ifdef CONFIG_PAX_REFCOUNT
5872 + tvs %xcc, 6
5873 +#endif
5874 +
5875 casx [%o1], %g1, %g7
5876 cmp %g1, %g7
5877 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5878 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5879 2: BACKOFF_SPIN(%o2, %o3, 1b)
5880 .size atomic64_add_ret, .-atomic64_add_ret
5881
5882 + .globl atomic64_add_ret_unchecked
5883 + .type atomic64_add_ret_unchecked,#function
5884 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5885 + BACKOFF_SETUP(%o2)
5886 +1: ldx [%o1], %g1
5887 + addcc %g1, %o0, %g7
5888 + casx [%o1], %g1, %g7
5889 + cmp %g1, %g7
5890 + bne,pn %xcc, 2f
5891 + add %g7, %o0, %g7
5892 + mov %g7, %o0
5893 + retl
5894 + nop
5895 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5896 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5897 +
5898 .globl atomic64_sub_ret
5899 .type atomic64_sub_ret,#function
5900 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5901 BACKOFF_SETUP(%o2)
5902 1: ldx [%o1], %g1
5903 - sub %g1, %o0, %g7
5904 + subcc %g1, %o0, %g7
5905 +
5906 +#ifdef CONFIG_PAX_REFCOUNT
5907 + tvs %xcc, 6
5908 +#endif
5909 +
5910 casx [%o1], %g1, %g7
5911 cmp %g1, %g7
5912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5913 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5914 index f73c224..662af10 100644
5915 --- a/arch/sparc/lib/ksyms.c
5916 +++ b/arch/sparc/lib/ksyms.c
5917 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
5918
5919 /* Atomic counter implementation. */
5920 EXPORT_SYMBOL(atomic_add);
5921 +EXPORT_SYMBOL(atomic_add_unchecked);
5922 EXPORT_SYMBOL(atomic_add_ret);
5923 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5924 EXPORT_SYMBOL(atomic_sub);
5925 +EXPORT_SYMBOL(atomic_sub_unchecked);
5926 EXPORT_SYMBOL(atomic_sub_ret);
5927 EXPORT_SYMBOL(atomic64_add);
5928 +EXPORT_SYMBOL(atomic64_add_unchecked);
5929 EXPORT_SYMBOL(atomic64_add_ret);
5930 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5931 EXPORT_SYMBOL(atomic64_sub);
5932 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5933 EXPORT_SYMBOL(atomic64_sub_ret);
5934
5935 /* Atomic bit operations. */
5936 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5937 index 301421c..e2535d1 100644
5938 --- a/arch/sparc/mm/Makefile
5939 +++ b/arch/sparc/mm/Makefile
5940 @@ -2,7 +2,7 @@
5941 #
5942
5943 asflags-y := -ansi
5944 -ccflags-y := -Werror
5945 +#ccflags-y := -Werror
5946
5947 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5948 obj-y += fault_$(BITS).o
5949 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5950 index 8023fd7..c8e89e9 100644
5951 --- a/arch/sparc/mm/fault_32.c
5952 +++ b/arch/sparc/mm/fault_32.c
5953 @@ -21,6 +21,9 @@
5954 #include <linux/perf_event.h>
5955 #include <linux/interrupt.h>
5956 #include <linux/kdebug.h>
5957 +#include <linux/slab.h>
5958 +#include <linux/pagemap.h>
5959 +#include <linux/compiler.h>
5960
5961 #include <asm/system.h>
5962 #include <asm/page.h>
5963 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5964 return safe_compute_effective_address(regs, insn);
5965 }
5966
5967 +#ifdef CONFIG_PAX_PAGEEXEC
5968 +#ifdef CONFIG_PAX_DLRESOLVE
5969 +static void pax_emuplt_close(struct vm_area_struct *vma)
5970 +{
5971 + vma->vm_mm->call_dl_resolve = 0UL;
5972 +}
5973 +
5974 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5975 +{
5976 + unsigned int *kaddr;
5977 +
5978 + vmf->page = alloc_page(GFP_HIGHUSER);
5979 + if (!vmf->page)
5980 + return VM_FAULT_OOM;
5981 +
5982 + kaddr = kmap(vmf->page);
5983 + memset(kaddr, 0, PAGE_SIZE);
5984 + kaddr[0] = 0x9DE3BFA8U; /* save */
5985 + flush_dcache_page(vmf->page);
5986 + kunmap(vmf->page);
5987 + return VM_FAULT_MAJOR;
5988 +}
5989 +
5990 +static const struct vm_operations_struct pax_vm_ops = {
5991 + .close = pax_emuplt_close,
5992 + .fault = pax_emuplt_fault
5993 +};
5994 +
5995 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5996 +{
5997 + int ret;
5998 +
5999 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6000 + vma->vm_mm = current->mm;
6001 + vma->vm_start = addr;
6002 + vma->vm_end = addr + PAGE_SIZE;
6003 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6004 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6005 + vma->vm_ops = &pax_vm_ops;
6006 +
6007 + ret = insert_vm_struct(current->mm, vma);
6008 + if (ret)
6009 + return ret;
6010 +
6011 + ++current->mm->total_vm;
6012 + return 0;
6013 +}
6014 +#endif
6015 +
6016 +/*
6017 + * PaX: decide what to do with offenders (regs->pc = fault address)
6018 + *
6019 + * returns 1 when task should be killed
6020 + * 2 when patched PLT trampoline was detected
6021 + * 3 when unpatched PLT trampoline was detected
6022 + */
6023 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6024 +{
6025 +
6026 +#ifdef CONFIG_PAX_EMUPLT
6027 + int err;
6028 +
6029 + do { /* PaX: patched PLT emulation #1 */
6030 + unsigned int sethi1, sethi2, jmpl;
6031 +
6032 + err = get_user(sethi1, (unsigned int *)regs->pc);
6033 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6034 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6035 +
6036 + if (err)
6037 + break;
6038 +
6039 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6040 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6041 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6042 + {
6043 + unsigned int addr;
6044 +
6045 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6046 + addr = regs->u_regs[UREG_G1];
6047 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6048 + regs->pc = addr;
6049 + regs->npc = addr+4;
6050 + return 2;
6051 + }
6052 + } while (0);
6053 +
6054 + { /* PaX: patched PLT emulation #2 */
6055 + unsigned int ba;
6056 +
6057 + err = get_user(ba, (unsigned int *)regs->pc);
6058 +
6059 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6060 + unsigned int addr;
6061 +
6062 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6063 + regs->pc = addr;
6064 + regs->npc = addr+4;
6065 + return 2;
6066 + }
6067 + }
6068 +
6069 + do { /* PaX: patched PLT emulation #3 */
6070 + unsigned int sethi, jmpl, nop;
6071 +
6072 + err = get_user(sethi, (unsigned int *)regs->pc);
6073 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6074 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6075 +
6076 + if (err)
6077 + break;
6078 +
6079 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6080 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6081 + nop == 0x01000000U)
6082 + {
6083 + unsigned int addr;
6084 +
6085 + addr = (sethi & 0x003FFFFFU) << 10;
6086 + regs->u_regs[UREG_G1] = addr;
6087 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6088 + regs->pc = addr;
6089 + regs->npc = addr+4;
6090 + return 2;
6091 + }
6092 + } while (0);
6093 +
6094 + do { /* PaX: unpatched PLT emulation step 1 */
6095 + unsigned int sethi, ba, nop;
6096 +
6097 + err = get_user(sethi, (unsigned int *)regs->pc);
6098 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6099 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6100 +
6101 + if (err)
6102 + break;
6103 +
6104 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6105 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6106 + nop == 0x01000000U)
6107 + {
6108 + unsigned int addr, save, call;
6109 +
6110 + if ((ba & 0xFFC00000U) == 0x30800000U)
6111 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6112 + else
6113 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6114 +
6115 + err = get_user(save, (unsigned int *)addr);
6116 + err |= get_user(call, (unsigned int *)(addr+4));
6117 + err |= get_user(nop, (unsigned int *)(addr+8));
6118 + if (err)
6119 + break;
6120 +
6121 +#ifdef CONFIG_PAX_DLRESOLVE
6122 + if (save == 0x9DE3BFA8U &&
6123 + (call & 0xC0000000U) == 0x40000000U &&
6124 + nop == 0x01000000U)
6125 + {
6126 + struct vm_area_struct *vma;
6127 + unsigned long call_dl_resolve;
6128 +
6129 + down_read(&current->mm->mmap_sem);
6130 + call_dl_resolve = current->mm->call_dl_resolve;
6131 + up_read(&current->mm->mmap_sem);
6132 + if (likely(call_dl_resolve))
6133 + goto emulate;
6134 +
6135 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6136 +
6137 + down_write(&current->mm->mmap_sem);
6138 + if (current->mm->call_dl_resolve) {
6139 + call_dl_resolve = current->mm->call_dl_resolve;
6140 + up_write(&current->mm->mmap_sem);
6141 + if (vma)
6142 + kmem_cache_free(vm_area_cachep, vma);
6143 + goto emulate;
6144 + }
6145 +
6146 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6147 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6148 + up_write(&current->mm->mmap_sem);
6149 + if (vma)
6150 + kmem_cache_free(vm_area_cachep, vma);
6151 + return 1;
6152 + }
6153 +
6154 + if (pax_insert_vma(vma, call_dl_resolve)) {
6155 + up_write(&current->mm->mmap_sem);
6156 + kmem_cache_free(vm_area_cachep, vma);
6157 + return 1;
6158 + }
6159 +
6160 + current->mm->call_dl_resolve = call_dl_resolve;
6161 + up_write(&current->mm->mmap_sem);
6162 +
6163 +emulate:
6164 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6165 + regs->pc = call_dl_resolve;
6166 + regs->npc = addr+4;
6167 + return 3;
6168 + }
6169 +#endif
6170 +
6171 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6172 + if ((save & 0xFFC00000U) == 0x05000000U &&
6173 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6174 + nop == 0x01000000U)
6175 + {
6176 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6177 + regs->u_regs[UREG_G2] = addr + 4;
6178 + addr = (save & 0x003FFFFFU) << 10;
6179 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6180 + regs->pc = addr;
6181 + regs->npc = addr+4;
6182 + return 3;
6183 + }
6184 + }
6185 + } while (0);
6186 +
6187 + do { /* PaX: unpatched PLT emulation step 2 */
6188 + unsigned int save, call, nop;
6189 +
6190 + err = get_user(save, (unsigned int *)(regs->pc-4));
6191 + err |= get_user(call, (unsigned int *)regs->pc);
6192 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6193 + if (err)
6194 + break;
6195 +
6196 + if (save == 0x9DE3BFA8U &&
6197 + (call & 0xC0000000U) == 0x40000000U &&
6198 + nop == 0x01000000U)
6199 + {
6200 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6201 +
6202 + regs->u_regs[UREG_RETPC] = regs->pc;
6203 + regs->pc = dl_resolve;
6204 + regs->npc = dl_resolve+4;
6205 + return 3;
6206 + }
6207 + } while (0);
6208 +#endif
6209 +
6210 + return 1;
6211 +}
6212 +
6213 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6214 +{
6215 + unsigned long i;
6216 +
6217 + printk(KERN_ERR "PAX: bytes at PC: ");
6218 + for (i = 0; i < 8; i++) {
6219 + unsigned int c;
6220 + if (get_user(c, (unsigned int *)pc+i))
6221 + printk(KERN_CONT "???????? ");
6222 + else
6223 + printk(KERN_CONT "%08x ", c);
6224 + }
6225 + printk("\n");
6226 +}
6227 +#endif
6228 +
6229 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6230 int text_fault)
6231 {
6232 @@ -280,6 +545,24 @@ good_area:
6233 if(!(vma->vm_flags & VM_WRITE))
6234 goto bad_area;
6235 } else {
6236 +
6237 +#ifdef CONFIG_PAX_PAGEEXEC
6238 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6239 + up_read(&mm->mmap_sem);
6240 + switch (pax_handle_fetch_fault(regs)) {
6241 +
6242 +#ifdef CONFIG_PAX_EMUPLT
6243 + case 2:
6244 + case 3:
6245 + return;
6246 +#endif
6247 +
6248 + }
6249 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6250 + do_group_exit(SIGKILL);
6251 + }
6252 +#endif
6253 +
6254 /* Allow reads even for write-only mappings */
6255 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6256 goto bad_area;
6257 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6258 index 504c062..6fcb9c6 100644
6259 --- a/arch/sparc/mm/fault_64.c
6260 +++ b/arch/sparc/mm/fault_64.c
6261 @@ -21,6 +21,9 @@
6262 #include <linux/kprobes.h>
6263 #include <linux/kdebug.h>
6264 #include <linux/percpu.h>
6265 +#include <linux/slab.h>
6266 +#include <linux/pagemap.h>
6267 +#include <linux/compiler.h>
6268
6269 #include <asm/page.h>
6270 #include <asm/pgtable.h>
6271 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6272 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6273 regs->tpc);
6274 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6275 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6276 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6277 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6278 dump_stack();
6279 unhandled_fault(regs->tpc, current, regs);
6280 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6281 show_regs(regs);
6282 }
6283
6284 +#ifdef CONFIG_PAX_PAGEEXEC
6285 +#ifdef CONFIG_PAX_DLRESOLVE
6286 +static void pax_emuplt_close(struct vm_area_struct *vma)
6287 +{
6288 + vma->vm_mm->call_dl_resolve = 0UL;
6289 +}
6290 +
6291 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6292 +{
6293 + unsigned int *kaddr;
6294 +
6295 + vmf->page = alloc_page(GFP_HIGHUSER);
6296 + if (!vmf->page)
6297 + return VM_FAULT_OOM;
6298 +
6299 + kaddr = kmap(vmf->page);
6300 + memset(kaddr, 0, PAGE_SIZE);
6301 + kaddr[0] = 0x9DE3BFA8U; /* save */
6302 + flush_dcache_page(vmf->page);
6303 + kunmap(vmf->page);
6304 + return VM_FAULT_MAJOR;
6305 +}
6306 +
6307 +static const struct vm_operations_struct pax_vm_ops = {
6308 + .close = pax_emuplt_close,
6309 + .fault = pax_emuplt_fault
6310 +};
6311 +
6312 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6313 +{
6314 + int ret;
6315 +
6316 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6317 + vma->vm_mm = current->mm;
6318 + vma->vm_start = addr;
6319 + vma->vm_end = addr + PAGE_SIZE;
6320 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6321 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6322 + vma->vm_ops = &pax_vm_ops;
6323 +
6324 + ret = insert_vm_struct(current->mm, vma);
6325 + if (ret)
6326 + return ret;
6327 +
6328 + ++current->mm->total_vm;
6329 + return 0;
6330 +}
6331 +#endif
6332 +
6333 +/*
6334 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6335 + *
6336 + * returns 1 when task should be killed
6337 + * 2 when patched PLT trampoline was detected
6338 + * 3 when unpatched PLT trampoline was detected
6339 + */
6340 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6341 +{
6342 +
6343 +#ifdef CONFIG_PAX_EMUPLT
6344 + int err;
6345 +
6346 + do { /* PaX: patched PLT emulation #1 */
6347 + unsigned int sethi1, sethi2, jmpl;
6348 +
6349 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6350 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6351 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6352 +
6353 + if (err)
6354 + break;
6355 +
6356 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6357 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6358 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6359 + {
6360 + unsigned long addr;
6361 +
6362 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6363 + addr = regs->u_regs[UREG_G1];
6364 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6365 +
6366 + if (test_thread_flag(TIF_32BIT))
6367 + addr &= 0xFFFFFFFFUL;
6368 +
6369 + regs->tpc = addr;
6370 + regs->tnpc = addr+4;
6371 + return 2;
6372 + }
6373 + } while (0);
6374 +
6375 + { /* PaX: patched PLT emulation #2 */
6376 + unsigned int ba;
6377 +
6378 + err = get_user(ba, (unsigned int *)regs->tpc);
6379 +
6380 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6381 + unsigned long addr;
6382 +
6383 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6384 +
6385 + if (test_thread_flag(TIF_32BIT))
6386 + addr &= 0xFFFFFFFFUL;
6387 +
6388 + regs->tpc = addr;
6389 + regs->tnpc = addr+4;
6390 + return 2;
6391 + }
6392 + }
6393 +
6394 + do { /* PaX: patched PLT emulation #3 */
6395 + unsigned int sethi, jmpl, nop;
6396 +
6397 + err = get_user(sethi, (unsigned int *)regs->tpc);
6398 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6399 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6400 +
6401 + if (err)
6402 + break;
6403 +
6404 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6405 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6406 + nop == 0x01000000U)
6407 + {
6408 + unsigned long addr;
6409 +
6410 + addr = (sethi & 0x003FFFFFU) << 10;
6411 + regs->u_regs[UREG_G1] = addr;
6412 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6413 +
6414 + if (test_thread_flag(TIF_32BIT))
6415 + addr &= 0xFFFFFFFFUL;
6416 +
6417 + regs->tpc = addr;
6418 + regs->tnpc = addr+4;
6419 + return 2;
6420 + }
6421 + } while (0);
6422 +
6423 + do { /* PaX: patched PLT emulation #4 */
6424 + unsigned int sethi, mov1, call, mov2;
6425 +
6426 + err = get_user(sethi, (unsigned int *)regs->tpc);
6427 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6428 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6429 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6430 +
6431 + if (err)
6432 + break;
6433 +
6434 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6435 + mov1 == 0x8210000FU &&
6436 + (call & 0xC0000000U) == 0x40000000U &&
6437 + mov2 == 0x9E100001U)
6438 + {
6439 + unsigned long addr;
6440 +
6441 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6442 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6443 +
6444 + if (test_thread_flag(TIF_32BIT))
6445 + addr &= 0xFFFFFFFFUL;
6446 +
6447 + regs->tpc = addr;
6448 + regs->tnpc = addr+4;
6449 + return 2;
6450 + }
6451 + } while (0);
6452 +
6453 + do { /* PaX: patched PLT emulation #5 */
6454 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6455 +
6456 + err = get_user(sethi, (unsigned int *)regs->tpc);
6457 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6458 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6459 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6460 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6461 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6462 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6463 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6464 +
6465 + if (err)
6466 + break;
6467 +
6468 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6469 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6470 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6471 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6472 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6473 + sllx == 0x83287020U &&
6474 + jmpl == 0x81C04005U &&
6475 + nop == 0x01000000U)
6476 + {
6477 + unsigned long addr;
6478 +
6479 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6480 + regs->u_regs[UREG_G1] <<= 32;
6481 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6482 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6483 + regs->tpc = addr;
6484 + regs->tnpc = addr+4;
6485 + return 2;
6486 + }
6487 + } while (0);
6488 +
6489 + do { /* PaX: patched PLT emulation #6 */
6490 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6491 +
6492 + err = get_user(sethi, (unsigned int *)regs->tpc);
6493 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6494 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6495 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6496 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6497 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6498 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6499 +
6500 + if (err)
6501 + break;
6502 +
6503 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6504 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6505 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6506 + sllx == 0x83287020U &&
6507 + (or & 0xFFFFE000U) == 0x8A116000U &&
6508 + jmpl == 0x81C04005U &&
6509 + nop == 0x01000000U)
6510 + {
6511 + unsigned long addr;
6512 +
6513 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6514 + regs->u_regs[UREG_G1] <<= 32;
6515 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6516 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6517 + regs->tpc = addr;
6518 + regs->tnpc = addr+4;
6519 + return 2;
6520 + }
6521 + } while (0);
6522 +
6523 + do { /* PaX: unpatched PLT emulation step 1 */
6524 + unsigned int sethi, ba, nop;
6525 +
6526 + err = get_user(sethi, (unsigned int *)regs->tpc);
6527 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6528 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6529 +
6530 + if (err)
6531 + break;
6532 +
6533 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6534 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6535 + nop == 0x01000000U)
6536 + {
6537 + unsigned long addr;
6538 + unsigned int save, call;
6539 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6540 +
6541 + if ((ba & 0xFFC00000U) == 0x30800000U)
6542 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6543 + else
6544 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6545 +
6546 + if (test_thread_flag(TIF_32BIT))
6547 + addr &= 0xFFFFFFFFUL;
6548 +
6549 + err = get_user(save, (unsigned int *)addr);
6550 + err |= get_user(call, (unsigned int *)(addr+4));
6551 + err |= get_user(nop, (unsigned int *)(addr+8));
6552 + if (err)
6553 + break;
6554 +
6555 +#ifdef CONFIG_PAX_DLRESOLVE
6556 + if (save == 0x9DE3BFA8U &&
6557 + (call & 0xC0000000U) == 0x40000000U &&
6558 + nop == 0x01000000U)
6559 + {
6560 + struct vm_area_struct *vma;
6561 + unsigned long call_dl_resolve;
6562 +
6563 + down_read(&current->mm->mmap_sem);
6564 + call_dl_resolve = current->mm->call_dl_resolve;
6565 + up_read(&current->mm->mmap_sem);
6566 + if (likely(call_dl_resolve))
6567 + goto emulate;
6568 +
6569 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6570 +
6571 + down_write(&current->mm->mmap_sem);
6572 + if (current->mm->call_dl_resolve) {
6573 + call_dl_resolve = current->mm->call_dl_resolve;
6574 + up_write(&current->mm->mmap_sem);
6575 + if (vma)
6576 + kmem_cache_free(vm_area_cachep, vma);
6577 + goto emulate;
6578 + }
6579 +
6580 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6581 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6582 + up_write(&current->mm->mmap_sem);
6583 + if (vma)
6584 + kmem_cache_free(vm_area_cachep, vma);
6585 + return 1;
6586 + }
6587 +
6588 + if (pax_insert_vma(vma, call_dl_resolve)) {
6589 + up_write(&current->mm->mmap_sem);
6590 + kmem_cache_free(vm_area_cachep, vma);
6591 + return 1;
6592 + }
6593 +
6594 + current->mm->call_dl_resolve = call_dl_resolve;
6595 + up_write(&current->mm->mmap_sem);
6596 +
6597 +emulate:
6598 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6599 + regs->tpc = call_dl_resolve;
6600 + regs->tnpc = addr+4;
6601 + return 3;
6602 + }
6603 +#endif
6604 +
6605 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6606 + if ((save & 0xFFC00000U) == 0x05000000U &&
6607 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6608 + nop == 0x01000000U)
6609 + {
6610 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6611 + regs->u_regs[UREG_G2] = addr + 4;
6612 + addr = (save & 0x003FFFFFU) << 10;
6613 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6614 +
6615 + if (test_thread_flag(TIF_32BIT))
6616 + addr &= 0xFFFFFFFFUL;
6617 +
6618 + regs->tpc = addr;
6619 + regs->tnpc = addr+4;
6620 + return 3;
6621 + }
6622 +
6623 + /* PaX: 64-bit PLT stub */
6624 + err = get_user(sethi1, (unsigned int *)addr);
6625 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6626 + err |= get_user(or1, (unsigned int *)(addr+8));
6627 + err |= get_user(or2, (unsigned int *)(addr+12));
6628 + err |= get_user(sllx, (unsigned int *)(addr+16));
6629 + err |= get_user(add, (unsigned int *)(addr+20));
6630 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6631 + err |= get_user(nop, (unsigned int *)(addr+28));
6632 + if (err)
6633 + break;
6634 +
6635 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6636 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6637 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6638 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6639 + sllx == 0x89293020U &&
6640 + add == 0x8A010005U &&
6641 + jmpl == 0x89C14000U &&
6642 + nop == 0x01000000U)
6643 + {
6644 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6645 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6646 + regs->u_regs[UREG_G4] <<= 32;
6647 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6648 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6649 + regs->u_regs[UREG_G4] = addr + 24;
6650 + addr = regs->u_regs[UREG_G5];
6651 + regs->tpc = addr;
6652 + regs->tnpc = addr+4;
6653 + return 3;
6654 + }
6655 + }
6656 + } while (0);
6657 +
6658 +#ifdef CONFIG_PAX_DLRESOLVE
6659 + do { /* PaX: unpatched PLT emulation step 2 */
6660 + unsigned int save, call, nop;
6661 +
6662 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6663 + err |= get_user(call, (unsigned int *)regs->tpc);
6664 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6665 + if (err)
6666 + break;
6667 +
6668 + if (save == 0x9DE3BFA8U &&
6669 + (call & 0xC0000000U) == 0x40000000U &&
6670 + nop == 0x01000000U)
6671 + {
6672 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6673 +
6674 + if (test_thread_flag(TIF_32BIT))
6675 + dl_resolve &= 0xFFFFFFFFUL;
6676 +
6677 + regs->u_regs[UREG_RETPC] = regs->tpc;
6678 + regs->tpc = dl_resolve;
6679 + regs->tnpc = dl_resolve+4;
6680 + return 3;
6681 + }
6682 + } while (0);
6683 +#endif
6684 +
6685 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6686 + unsigned int sethi, ba, nop;
6687 +
6688 + err = get_user(sethi, (unsigned int *)regs->tpc);
6689 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6690 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6691 +
6692 + if (err)
6693 + break;
6694 +
6695 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6696 + (ba & 0xFFF00000U) == 0x30600000U &&
6697 + nop == 0x01000000U)
6698 + {
6699 + unsigned long addr;
6700 +
6701 + addr = (sethi & 0x003FFFFFU) << 10;
6702 + regs->u_regs[UREG_G1] = addr;
6703 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6704 +
6705 + if (test_thread_flag(TIF_32BIT))
6706 + addr &= 0xFFFFFFFFUL;
6707 +
6708 + regs->tpc = addr;
6709 + regs->tnpc = addr+4;
6710 + return 2;
6711 + }
6712 + } while (0);
6713 +
6714 +#endif
6715 +
6716 + return 1;
6717 +}
6718 +
6719 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6720 +{
6721 + unsigned long i;
6722 +
6723 + printk(KERN_ERR "PAX: bytes at PC: ");
6724 + for (i = 0; i < 8; i++) {
6725 + unsigned int c;
6726 + if (get_user(c, (unsigned int *)pc+i))
6727 + printk(KERN_CONT "???????? ");
6728 + else
6729 + printk(KERN_CONT "%08x ", c);
6730 + }
6731 + printk("\n");
6732 +}
6733 +#endif
6734 +
6735 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6736 {
6737 struct mm_struct *mm = current->mm;
6738 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6739 if (!vma)
6740 goto bad_area;
6741
6742 +#ifdef CONFIG_PAX_PAGEEXEC
6743 + /* PaX: detect ITLB misses on non-exec pages */
6744 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6745 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6746 + {
6747 + if (address != regs->tpc)
6748 + goto good_area;
6749 +
6750 + up_read(&mm->mmap_sem);
6751 + switch (pax_handle_fetch_fault(regs)) {
6752 +
6753 +#ifdef CONFIG_PAX_EMUPLT
6754 + case 2:
6755 + case 3:
6756 + return;
6757 +#endif
6758 +
6759 + }
6760 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6761 + do_group_exit(SIGKILL);
6762 + }
6763 +#endif
6764 +
6765 /* Pure DTLB misses do not tell us whether the fault causing
6766 * load/store/atomic was a write or not, it only says that there
6767 * was no match. So in such a case we (carefully) read the
6768 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6769 index 07e1453..0a7d9e9 100644
6770 --- a/arch/sparc/mm/hugetlbpage.c
6771 +++ b/arch/sparc/mm/hugetlbpage.c
6772 @@ -67,7 +67,7 @@ full_search:
6773 }
6774 return -ENOMEM;
6775 }
6776 - if (likely(!vma || addr + len <= vma->vm_start)) {
6777 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6778 /*
6779 * Remember the place where we stopped the search:
6780 */
6781 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6782 /* make sure it can fit in the remaining address space */
6783 if (likely(addr > len)) {
6784 vma = find_vma(mm, addr-len);
6785 - if (!vma || addr <= vma->vm_start) {
6786 + if (check_heap_stack_gap(vma, addr - len, len)) {
6787 /* remember the address as a hint for next time */
6788 return (mm->free_area_cache = addr-len);
6789 }
6790 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6791 if (unlikely(mm->mmap_base < len))
6792 goto bottomup;
6793
6794 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6795 + addr = mm->mmap_base - len;
6796
6797 do {
6798 + addr &= HPAGE_MASK;
6799 /*
6800 * Lookup failure means no vma is above this address,
6801 * else if new region fits below vma->vm_start,
6802 * return with success:
6803 */
6804 vma = find_vma(mm, addr);
6805 - if (likely(!vma || addr+len <= vma->vm_start)) {
6806 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6807 /* remember the address as a hint for next time */
6808 return (mm->free_area_cache = addr);
6809 }
6810 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6811 mm->cached_hole_size = vma->vm_start - addr;
6812
6813 /* try just below the current vma->vm_start */
6814 - addr = (vma->vm_start-len) & HPAGE_MASK;
6815 - } while (likely(len < vma->vm_start));
6816 + addr = skip_heap_stack_gap(vma, len);
6817 + } while (!IS_ERR_VALUE(addr));
6818
6819 bottomup:
6820 /*
6821 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6822 if (addr) {
6823 addr = ALIGN(addr, HPAGE_SIZE);
6824 vma = find_vma(mm, addr);
6825 - if (task_size - len >= addr &&
6826 - (!vma || addr + len <= vma->vm_start))
6827 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6828 return addr;
6829 }
6830 if (mm->get_unmapped_area == arch_get_unmapped_area)
6831 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6832 index 7b00de6..78239f4 100644
6833 --- a/arch/sparc/mm/init_32.c
6834 +++ b/arch/sparc/mm/init_32.c
6835 @@ -316,6 +316,9 @@ extern void device_scan(void);
6836 pgprot_t PAGE_SHARED __read_mostly;
6837 EXPORT_SYMBOL(PAGE_SHARED);
6838
6839 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6840 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6841 +
6842 void __init paging_init(void)
6843 {
6844 switch(sparc_cpu_model) {
6845 @@ -344,17 +347,17 @@ void __init paging_init(void)
6846
6847 /* Initialize the protection map with non-constant, MMU dependent values. */
6848 protection_map[0] = PAGE_NONE;
6849 - protection_map[1] = PAGE_READONLY;
6850 - protection_map[2] = PAGE_COPY;
6851 - protection_map[3] = PAGE_COPY;
6852 + protection_map[1] = PAGE_READONLY_NOEXEC;
6853 + protection_map[2] = PAGE_COPY_NOEXEC;
6854 + protection_map[3] = PAGE_COPY_NOEXEC;
6855 protection_map[4] = PAGE_READONLY;
6856 protection_map[5] = PAGE_READONLY;
6857 protection_map[6] = PAGE_COPY;
6858 protection_map[7] = PAGE_COPY;
6859 protection_map[8] = PAGE_NONE;
6860 - protection_map[9] = PAGE_READONLY;
6861 - protection_map[10] = PAGE_SHARED;
6862 - protection_map[11] = PAGE_SHARED;
6863 + protection_map[9] = PAGE_READONLY_NOEXEC;
6864 + protection_map[10] = PAGE_SHARED_NOEXEC;
6865 + protection_map[11] = PAGE_SHARED_NOEXEC;
6866 protection_map[12] = PAGE_READONLY;
6867 protection_map[13] = PAGE_READONLY;
6868 protection_map[14] = PAGE_SHARED;
6869 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6870 index cbef74e..c38fead 100644
6871 --- a/arch/sparc/mm/srmmu.c
6872 +++ b/arch/sparc/mm/srmmu.c
6873 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6874 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6875 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6876 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6877 +
6878 +#ifdef CONFIG_PAX_PAGEEXEC
6879 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6880 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6881 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6882 +#endif
6883 +
6884 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6885 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6886
6887 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6888 index 27fe667..36d474c 100644
6889 --- a/arch/tile/include/asm/atomic_64.h
6890 +++ b/arch/tile/include/asm/atomic_64.h
6891 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6892
6893 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6894
6895 +#define atomic64_read_unchecked(v) atomic64_read(v)
6896 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6897 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6898 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6899 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6900 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6901 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6902 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6903 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6904 +
6905 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6906 #define smp_mb__before_atomic_dec() smp_mb()
6907 #define smp_mb__after_atomic_dec() smp_mb()
6908 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6909 index 392e533..536b092 100644
6910 --- a/arch/tile/include/asm/cache.h
6911 +++ b/arch/tile/include/asm/cache.h
6912 @@ -15,11 +15,12 @@
6913 #ifndef _ASM_TILE_CACHE_H
6914 #define _ASM_TILE_CACHE_H
6915
6916 +#include <linux/const.h>
6917 #include <arch/chip.h>
6918
6919 /* bytes per L1 data cache line */
6920 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6921 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6922 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6923
6924 /* bytes per L2 cache line */
6925 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6926 diff --git a/arch/um/Makefile b/arch/um/Makefile
6927 index 28688e6..4c0aa1c 100644
6928 --- a/arch/um/Makefile
6929 +++ b/arch/um/Makefile
6930 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6931 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6932 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6933
6934 +ifdef CONSTIFY_PLUGIN
6935 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6936 +endif
6937 +
6938 #This will adjust *FLAGS accordingly to the platform.
6939 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6940
6941 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6942 index 19e1bdd..3665b77 100644
6943 --- a/arch/um/include/asm/cache.h
6944 +++ b/arch/um/include/asm/cache.h
6945 @@ -1,6 +1,7 @@
6946 #ifndef __UM_CACHE_H
6947 #define __UM_CACHE_H
6948
6949 +#include <linux/const.h>
6950
6951 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6952 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6953 @@ -12,6 +13,6 @@
6954 # define L1_CACHE_SHIFT 5
6955 #endif
6956
6957 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6958 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6959
6960 #endif
6961 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6962 index 6c03acd..a5e0215 100644
6963 --- a/arch/um/include/asm/kmap_types.h
6964 +++ b/arch/um/include/asm/kmap_types.h
6965 @@ -23,6 +23,7 @@ enum km_type {
6966 KM_IRQ1,
6967 KM_SOFTIRQ0,
6968 KM_SOFTIRQ1,
6969 + KM_CLEARPAGE,
6970 KM_TYPE_NR
6971 };
6972
6973 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6974 index 7cfc3ce..cbd1a58 100644
6975 --- a/arch/um/include/asm/page.h
6976 +++ b/arch/um/include/asm/page.h
6977 @@ -14,6 +14,9 @@
6978 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6979 #define PAGE_MASK (~(PAGE_SIZE-1))
6980
6981 +#define ktla_ktva(addr) (addr)
6982 +#define ktva_ktla(addr) (addr)
6983 +
6984 #ifndef __ASSEMBLY__
6985
6986 struct page;
6987 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6988 index 69f2490..2634831 100644
6989 --- a/arch/um/kernel/process.c
6990 +++ b/arch/um/kernel/process.c
6991 @@ -408,22 +408,6 @@ int singlestepping(void * t)
6992 return 2;
6993 }
6994
6995 -/*
6996 - * Only x86 and x86_64 have an arch_align_stack().
6997 - * All other arches have "#define arch_align_stack(x) (x)"
6998 - * in their asm/system.h
6999 - * As this is included in UML from asm-um/system-generic.h,
7000 - * we can use it to behave as the subarch does.
7001 - */
7002 -#ifndef arch_align_stack
7003 -unsigned long arch_align_stack(unsigned long sp)
7004 -{
7005 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7006 - sp -= get_random_int() % 8192;
7007 - return sp & ~0xf;
7008 -}
7009 -#endif
7010 -
7011 unsigned long get_wchan(struct task_struct *p)
7012 {
7013 unsigned long stack_page, sp, ip;
7014 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7015 index ad8f795..2c7eec6 100644
7016 --- a/arch/unicore32/include/asm/cache.h
7017 +++ b/arch/unicore32/include/asm/cache.h
7018 @@ -12,8 +12,10 @@
7019 #ifndef __UNICORE_CACHE_H__
7020 #define __UNICORE_CACHE_H__
7021
7022 -#define L1_CACHE_SHIFT (5)
7023 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7024 +#include <linux/const.h>
7025 +
7026 +#define L1_CACHE_SHIFT 5
7027 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7028
7029 /*
7030 * Memory returned by kmalloc() may be used for DMA, so we must make
7031 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7032 index 5bed94e..fbcf200 100644
7033 --- a/arch/x86/Kconfig
7034 +++ b/arch/x86/Kconfig
7035 @@ -226,7 +226,7 @@ config X86_HT
7036
7037 config X86_32_LAZY_GS
7038 def_bool y
7039 - depends on X86_32 && !CC_STACKPROTECTOR
7040 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7041
7042 config ARCH_HWEIGHT_CFLAGS
7043 string
7044 @@ -1058,7 +1058,7 @@ choice
7045
7046 config NOHIGHMEM
7047 bool "off"
7048 - depends on !X86_NUMAQ
7049 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7050 ---help---
7051 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7052 However, the address space of 32-bit x86 processors is only 4
7053 @@ -1095,7 +1095,7 @@ config NOHIGHMEM
7054
7055 config HIGHMEM4G
7056 bool "4GB"
7057 - depends on !X86_NUMAQ
7058 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7059 ---help---
7060 Select this if you have a 32-bit processor and between 1 and 4
7061 gigabytes of physical RAM.
7062 @@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7063 hex
7064 default 0xB0000000 if VMSPLIT_3G_OPT
7065 default 0x80000000 if VMSPLIT_2G
7066 - default 0x78000000 if VMSPLIT_2G_OPT
7067 + default 0x70000000 if VMSPLIT_2G_OPT
7068 default 0x40000000 if VMSPLIT_1G
7069 default 0xC0000000
7070 depends on X86_32
7071 @@ -1539,6 +1539,7 @@ config SECCOMP
7072
7073 config CC_STACKPROTECTOR
7074 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7075 + depends on X86_64 || !PAX_MEMORY_UDEREF
7076 ---help---
7077 This option turns on the -fstack-protector GCC feature. This
7078 feature puts, at the beginning of functions, a canary value on
7079 @@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7080 config PHYSICAL_START
7081 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7082 default "0x1000000"
7083 + range 0x400000 0x40000000
7084 ---help---
7085 This gives the physical address where the kernel is loaded.
7086
7087 @@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7088 config PHYSICAL_ALIGN
7089 hex "Alignment value to which kernel should be aligned" if X86_32
7090 default "0x1000000"
7091 + range 0x400000 0x1000000 if PAX_KERNEXEC
7092 range 0x2000 0x1000000
7093 ---help---
7094 This value puts the alignment restrictions on physical address
7095 @@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7096 Say N if you want to disable CPU hotplug.
7097
7098 config COMPAT_VDSO
7099 - def_bool y
7100 + def_bool n
7101 prompt "Compat VDSO support"
7102 depends on X86_32 || IA32_EMULATION
7103 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7104 ---help---
7105 Map the 32-bit VDSO to the predictable old-style address too.
7106
7107 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7108 index 3c57033..22d44aa 100644
7109 --- a/arch/x86/Kconfig.cpu
7110 +++ b/arch/x86/Kconfig.cpu
7111 @@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7112
7113 config X86_F00F_BUG
7114 def_bool y
7115 - depends on M586MMX || M586TSC || M586 || M486 || M386
7116 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7117
7118 config X86_INVD_BUG
7119 def_bool y
7120 @@ -359,7 +359,7 @@ config X86_POPAD_OK
7121
7122 config X86_ALIGNMENT_16
7123 def_bool y
7124 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7125 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7126
7127 config X86_INTEL_USERCOPY
7128 def_bool y
7129 @@ -405,7 +405,7 @@ config X86_CMPXCHG64
7130 # generates cmov.
7131 config X86_CMOV
7132 def_bool y
7133 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7134 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7135
7136 config X86_MINIMUM_CPU_FAMILY
7137 int
7138 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7139 index e46c214..7c72b55 100644
7140 --- a/arch/x86/Kconfig.debug
7141 +++ b/arch/x86/Kconfig.debug
7142 @@ -84,7 +84,7 @@ config X86_PTDUMP
7143 config DEBUG_RODATA
7144 bool "Write protect kernel read-only data structures"
7145 default y
7146 - depends on DEBUG_KERNEL
7147 + depends on DEBUG_KERNEL && BROKEN
7148 ---help---
7149 Mark the kernel read-only data as write-protected in the pagetables,
7150 in order to catch accidental (and incorrect) writes to such const
7151 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7152
7153 config DEBUG_SET_MODULE_RONX
7154 bool "Set loadable kernel module data as NX and text as RO"
7155 - depends on MODULES
7156 + depends on MODULES && BROKEN
7157 ---help---
7158 This option helps catch unintended modifications to loadable
7159 kernel module's text and read-only data. It also prevents execution
7160 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7161 index 209ba12..15140db 100644
7162 --- a/arch/x86/Makefile
7163 +++ b/arch/x86/Makefile
7164 @@ -46,6 +46,7 @@ else
7165 UTS_MACHINE := x86_64
7166 CHECKFLAGS += -D__x86_64__ -m64
7167
7168 + biarch := $(call cc-option,-m64)
7169 KBUILD_AFLAGS += -m64
7170 KBUILD_CFLAGS += -m64
7171
7172 @@ -201,3 +202,12 @@ define archhelp
7173 echo ' FDARGS="..." arguments for the booted kernel'
7174 echo ' FDINITRD=file initrd for the booted kernel'
7175 endef
7176 +
7177 +define OLD_LD
7178 +
7179 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7180 +*** Please upgrade your binutils to 2.18 or newer
7181 +endef
7182 +
7183 +archprepare:
7184 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7185 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7186 index 95365a8..52f857b 100644
7187 --- a/arch/x86/boot/Makefile
7188 +++ b/arch/x86/boot/Makefile
7189 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7190 $(call cc-option, -fno-stack-protector) \
7191 $(call cc-option, -mpreferred-stack-boundary=2)
7192 KBUILD_CFLAGS += $(call cc-option, -m32)
7193 +ifdef CONSTIFY_PLUGIN
7194 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7195 +endif
7196 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7197 GCOV_PROFILE := n
7198
7199 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7200 index 878e4b9..20537ab 100644
7201 --- a/arch/x86/boot/bitops.h
7202 +++ b/arch/x86/boot/bitops.h
7203 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7204 u8 v;
7205 const u32 *p = (const u32 *)addr;
7206
7207 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7208 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7209 return v;
7210 }
7211
7212 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7213
7214 static inline void set_bit(int nr, void *addr)
7215 {
7216 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7217 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7218 }
7219
7220 #endif /* BOOT_BITOPS_H */
7221 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7222 index c7093bd..d4247ffe0 100644
7223 --- a/arch/x86/boot/boot.h
7224 +++ b/arch/x86/boot/boot.h
7225 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7226 static inline u16 ds(void)
7227 {
7228 u16 seg;
7229 - asm("movw %%ds,%0" : "=rm" (seg));
7230 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7231 return seg;
7232 }
7233
7234 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7235 static inline int memcmp(const void *s1, const void *s2, size_t len)
7236 {
7237 u8 diff;
7238 - asm("repe; cmpsb; setnz %0"
7239 + asm volatile("repe; cmpsb; setnz %0"
7240 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7241 return diff;
7242 }
7243 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7244 index b123b9a..2cf2f23 100644
7245 --- a/arch/x86/boot/compressed/Makefile
7246 +++ b/arch/x86/boot/compressed/Makefile
7247 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7248 KBUILD_CFLAGS += $(cflags-y)
7249 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7250 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7251 +ifdef CONSTIFY_PLUGIN
7252 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7253 +endif
7254
7255 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7256 GCOV_PROFILE := n
7257 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7258 index a055993..47e126c 100644
7259 --- a/arch/x86/boot/compressed/head_32.S
7260 +++ b/arch/x86/boot/compressed/head_32.S
7261 @@ -98,7 +98,7 @@ preferred_addr:
7262 notl %eax
7263 andl %eax, %ebx
7264 #else
7265 - movl $LOAD_PHYSICAL_ADDR, %ebx
7266 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7267 #endif
7268
7269 /* Target address to relocate to for decompression */
7270 @@ -184,7 +184,7 @@ relocated:
7271 * and where it was actually loaded.
7272 */
7273 movl %ebp, %ebx
7274 - subl $LOAD_PHYSICAL_ADDR, %ebx
7275 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7276 jz 2f /* Nothing to be done if loaded at compiled addr. */
7277 /*
7278 * Process relocations.
7279 @@ -192,8 +192,7 @@ relocated:
7280
7281 1: subl $4, %edi
7282 movl (%edi), %ecx
7283 - testl %ecx, %ecx
7284 - jz 2f
7285 + jecxz 2f
7286 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7287 jmp 1b
7288 2:
7289 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7290 index 558d76c..606aa24 100644
7291 --- a/arch/x86/boot/compressed/head_64.S
7292 +++ b/arch/x86/boot/compressed/head_64.S
7293 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7294 notl %eax
7295 andl %eax, %ebx
7296 #else
7297 - movl $LOAD_PHYSICAL_ADDR, %ebx
7298 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7299 #endif
7300
7301 /* Target address to relocate to for decompression */
7302 @@ -253,7 +253,7 @@ preferred_addr:
7303 notq %rax
7304 andq %rax, %rbp
7305 #else
7306 - movq $LOAD_PHYSICAL_ADDR, %rbp
7307 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7308 #endif
7309
7310 /* Target address to relocate to for decompression */
7311 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7312 index 7116dcb..d9ae1d7 100644
7313 --- a/arch/x86/boot/compressed/misc.c
7314 +++ b/arch/x86/boot/compressed/misc.c
7315 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7316 case PT_LOAD:
7317 #ifdef CONFIG_RELOCATABLE
7318 dest = output;
7319 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7320 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7321 #else
7322 dest = (void *)(phdr->p_paddr);
7323 #endif
7324 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7325 error("Destination address too large");
7326 #endif
7327 #ifndef CONFIG_RELOCATABLE
7328 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7329 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7330 error("Wrong destination address");
7331 #endif
7332
7333 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7334 index 89bbf4e..869908e 100644
7335 --- a/arch/x86/boot/compressed/relocs.c
7336 +++ b/arch/x86/boot/compressed/relocs.c
7337 @@ -13,8 +13,11 @@
7338
7339 static void die(char *fmt, ...);
7340
7341 +#include "../../../../include/generated/autoconf.h"
7342 +
7343 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7344 static Elf32_Ehdr ehdr;
7345 +static Elf32_Phdr *phdr;
7346 static unsigned long reloc_count, reloc_idx;
7347 static unsigned long *relocs;
7348
7349 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7350 }
7351 }
7352
7353 +static void read_phdrs(FILE *fp)
7354 +{
7355 + unsigned int i;
7356 +
7357 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7358 + if (!phdr) {
7359 + die("Unable to allocate %d program headers\n",
7360 + ehdr.e_phnum);
7361 + }
7362 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7363 + die("Seek to %d failed: %s\n",
7364 + ehdr.e_phoff, strerror(errno));
7365 + }
7366 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7367 + die("Cannot read ELF program headers: %s\n",
7368 + strerror(errno));
7369 + }
7370 + for(i = 0; i < ehdr.e_phnum; i++) {
7371 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7372 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7373 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7374 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7375 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7376 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7377 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7378 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7379 + }
7380 +
7381 +}
7382 +
7383 static void read_shdrs(FILE *fp)
7384 {
7385 - int i;
7386 + unsigned int i;
7387 Elf32_Shdr shdr;
7388
7389 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7390 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7391
7392 static void read_strtabs(FILE *fp)
7393 {
7394 - int i;
7395 + unsigned int i;
7396 for (i = 0; i < ehdr.e_shnum; i++) {
7397 struct section *sec = &secs[i];
7398 if (sec->shdr.sh_type != SHT_STRTAB) {
7399 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7400
7401 static void read_symtabs(FILE *fp)
7402 {
7403 - int i,j;
7404 + unsigned int i,j;
7405 for (i = 0; i < ehdr.e_shnum; i++) {
7406 struct section *sec = &secs[i];
7407 if (sec->shdr.sh_type != SHT_SYMTAB) {
7408 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7409
7410 static void read_relocs(FILE *fp)
7411 {
7412 - int i,j;
7413 + unsigned int i,j;
7414 + uint32_t base;
7415 +
7416 for (i = 0; i < ehdr.e_shnum; i++) {
7417 struct section *sec = &secs[i];
7418 if (sec->shdr.sh_type != SHT_REL) {
7419 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7420 die("Cannot read symbol table: %s\n",
7421 strerror(errno));
7422 }
7423 + base = 0;
7424 + for (j = 0; j < ehdr.e_phnum; j++) {
7425 + if (phdr[j].p_type != PT_LOAD )
7426 + continue;
7427 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7428 + continue;
7429 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7430 + break;
7431 + }
7432 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7433 Elf32_Rel *rel = &sec->reltab[j];
7434 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7435 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7436 rel->r_info = elf32_to_cpu(rel->r_info);
7437 }
7438 }
7439 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7440
7441 static void print_absolute_symbols(void)
7442 {
7443 - int i;
7444 + unsigned int i;
7445 printf("Absolute symbols\n");
7446 printf(" Num: Value Size Type Bind Visibility Name\n");
7447 for (i = 0; i < ehdr.e_shnum; i++) {
7448 struct section *sec = &secs[i];
7449 char *sym_strtab;
7450 Elf32_Sym *sh_symtab;
7451 - int j;
7452 + unsigned int j;
7453
7454 if (sec->shdr.sh_type != SHT_SYMTAB) {
7455 continue;
7456 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7457
7458 static void print_absolute_relocs(void)
7459 {
7460 - int i, printed = 0;
7461 + unsigned int i, printed = 0;
7462
7463 for (i = 0; i < ehdr.e_shnum; i++) {
7464 struct section *sec = &secs[i];
7465 struct section *sec_applies, *sec_symtab;
7466 char *sym_strtab;
7467 Elf32_Sym *sh_symtab;
7468 - int j;
7469 + unsigned int j;
7470 if (sec->shdr.sh_type != SHT_REL) {
7471 continue;
7472 }
7473 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7474
7475 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7476 {
7477 - int i;
7478 + unsigned int i;
7479 /* Walk through the relocations */
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 char *sym_strtab;
7482 Elf32_Sym *sh_symtab;
7483 struct section *sec_applies, *sec_symtab;
7484 - int j;
7485 + unsigned int j;
7486 struct section *sec = &secs[i];
7487
7488 if (sec->shdr.sh_type != SHT_REL) {
7489 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7490 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7491 continue;
7492 }
7493 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7494 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7495 + continue;
7496 +
7497 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7498 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7499 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7500 + continue;
7501 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7502 + continue;
7503 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7504 + continue;
7505 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7506 + continue;
7507 +#endif
7508 +
7509 switch (r_type) {
7510 case R_386_NONE:
7511 case R_386_PC32:
7512 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7513
7514 static void emit_relocs(int as_text)
7515 {
7516 - int i;
7517 + unsigned int i;
7518 /* Count how many relocations I have and allocate space for them. */
7519 reloc_count = 0;
7520 walk_relocs(count_reloc);
7521 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
7522 fname, strerror(errno));
7523 }
7524 read_ehdr(fp);
7525 + read_phdrs(fp);
7526 read_shdrs(fp);
7527 read_strtabs(fp);
7528 read_symtabs(fp);
7529 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7530 index 4d3ff03..e4972ff 100644
7531 --- a/arch/x86/boot/cpucheck.c
7532 +++ b/arch/x86/boot/cpucheck.c
7533 @@ -74,7 +74,7 @@ static int has_fpu(void)
7534 u16 fcw = -1, fsw = -1;
7535 u32 cr0;
7536
7537 - asm("movl %%cr0,%0" : "=r" (cr0));
7538 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7539 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7540 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7541 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7542 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7543 {
7544 u32 f0, f1;
7545
7546 - asm("pushfl ; "
7547 + asm volatile("pushfl ; "
7548 "pushfl ; "
7549 "popl %0 ; "
7550 "movl %0,%1 ; "
7551 @@ -115,7 +115,7 @@ static void get_flags(void)
7552 set_bit(X86_FEATURE_FPU, cpu.flags);
7553
7554 if (has_eflag(X86_EFLAGS_ID)) {
7555 - asm("cpuid"
7556 + asm volatile("cpuid"
7557 : "=a" (max_intel_level),
7558 "=b" (cpu_vendor[0]),
7559 "=d" (cpu_vendor[1]),
7560 @@ -124,7 +124,7 @@ static void get_flags(void)
7561
7562 if (max_intel_level >= 0x00000001 &&
7563 max_intel_level <= 0x0000ffff) {
7564 - asm("cpuid"
7565 + asm volatile("cpuid"
7566 : "=a" (tfms),
7567 "=c" (cpu.flags[4]),
7568 "=d" (cpu.flags[0])
7569 @@ -136,7 +136,7 @@ static void get_flags(void)
7570 cpu.model += ((tfms >> 16) & 0xf) << 4;
7571 }
7572
7573 - asm("cpuid"
7574 + asm volatile("cpuid"
7575 : "=a" (max_amd_level)
7576 : "a" (0x80000000)
7577 : "ebx", "ecx", "edx");
7578 @@ -144,7 +144,7 @@ static void get_flags(void)
7579 if (max_amd_level >= 0x80000001 &&
7580 max_amd_level <= 0x8000ffff) {
7581 u32 eax = 0x80000001;
7582 - asm("cpuid"
7583 + asm volatile("cpuid"
7584 : "+a" (eax),
7585 "=c" (cpu.flags[6]),
7586 "=d" (cpu.flags[1])
7587 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7588 u32 ecx = MSR_K7_HWCR;
7589 u32 eax, edx;
7590
7591 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7592 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7593 eax &= ~(1 << 15);
7594 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7595 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7596
7597 get_flags(); /* Make sure it really did something */
7598 err = check_flags();
7599 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7600 u32 ecx = MSR_VIA_FCR;
7601 u32 eax, edx;
7602
7603 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7604 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7605 eax |= (1<<1)|(1<<7);
7606 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7607 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7608
7609 set_bit(X86_FEATURE_CX8, cpu.flags);
7610 err = check_flags();
7611 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7612 u32 eax, edx;
7613 u32 level = 1;
7614
7615 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7616 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7617 - asm("cpuid"
7618 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7619 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7620 + asm volatile("cpuid"
7621 : "+a" (level), "=d" (cpu.flags[0])
7622 : : "ecx", "ebx");
7623 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7624 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7625
7626 err = check_flags();
7627 }
7628 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7629 index f1bbeeb..aff09cb 100644
7630 --- a/arch/x86/boot/header.S
7631 +++ b/arch/x86/boot/header.S
7632 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7633 # single linked list of
7634 # struct setup_data
7635
7636 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7637 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7638
7639 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7640 #define VO_INIT_SIZE (VO__end - VO__text)
7641 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7642 index db75d07..8e6d0af 100644
7643 --- a/arch/x86/boot/memory.c
7644 +++ b/arch/x86/boot/memory.c
7645 @@ -19,7 +19,7 @@
7646
7647 static int detect_memory_e820(void)
7648 {
7649 - int count = 0;
7650 + unsigned int count = 0;
7651 struct biosregs ireg, oreg;
7652 struct e820entry *desc = boot_params.e820_map;
7653 static struct e820entry buf; /* static so it is zeroed */
7654 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7655 index 11e8c6e..fdbb1ed 100644
7656 --- a/arch/x86/boot/video-vesa.c
7657 +++ b/arch/x86/boot/video-vesa.c
7658 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7659
7660 boot_params.screen_info.vesapm_seg = oreg.es;
7661 boot_params.screen_info.vesapm_off = oreg.di;
7662 + boot_params.screen_info.vesapm_size = oreg.cx;
7663 }
7664
7665 /*
7666 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7667 index 43eda28..5ab5fdb 100644
7668 --- a/arch/x86/boot/video.c
7669 +++ b/arch/x86/boot/video.c
7670 @@ -96,7 +96,7 @@ static void store_mode_params(void)
7671 static unsigned int get_entry(void)
7672 {
7673 char entry_buf[4];
7674 - int i, len = 0;
7675 + unsigned int i, len = 0;
7676 int key;
7677 unsigned int v;
7678
7679 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7680 index 5b577d5..3c1fed4 100644
7681 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7682 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7683 @@ -8,6 +8,8 @@
7684 * including this sentence is retained in full.
7685 */
7686
7687 +#include <asm/alternative-asm.h>
7688 +
7689 .extern crypto_ft_tab
7690 .extern crypto_it_tab
7691 .extern crypto_fl_tab
7692 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7693 je B192; \
7694 leaq 32(r9),r9;
7695
7696 +#define ret pax_force_retaddr 0, 1; ret
7697 +
7698 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7699 movq r1,r2; \
7700 movq r3,r4; \
7701 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7702 index be6d9e3..21fbbca 100644
7703 --- a/arch/x86/crypto/aesni-intel_asm.S
7704 +++ b/arch/x86/crypto/aesni-intel_asm.S
7705 @@ -31,6 +31,7 @@
7706
7707 #include <linux/linkage.h>
7708 #include <asm/inst.h>
7709 +#include <asm/alternative-asm.h>
7710
7711 #ifdef __x86_64__
7712 .data
7713 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7714 pop %r14
7715 pop %r13
7716 pop %r12
7717 + pax_force_retaddr 0, 1
7718 ret
7719 +ENDPROC(aesni_gcm_dec)
7720
7721
7722 /*****************************************************************************
7723 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7724 pop %r14
7725 pop %r13
7726 pop %r12
7727 + pax_force_retaddr 0, 1
7728 ret
7729 +ENDPROC(aesni_gcm_enc)
7730
7731 #endif
7732
7733 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
7734 pxor %xmm1, %xmm0
7735 movaps %xmm0, (TKEYP)
7736 add $0x10, TKEYP
7737 + pax_force_retaddr_bts
7738 ret
7739
7740 .align 4
7741 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
7742 shufps $0b01001110, %xmm2, %xmm1
7743 movaps %xmm1, 0x10(TKEYP)
7744 add $0x20, TKEYP
7745 + pax_force_retaddr_bts
7746 ret
7747
7748 .align 4
7749 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
7750
7751 movaps %xmm0, (TKEYP)
7752 add $0x10, TKEYP
7753 + pax_force_retaddr_bts
7754 ret
7755
7756 .align 4
7757 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
7758 pxor %xmm1, %xmm2
7759 movaps %xmm2, (TKEYP)
7760 add $0x10, TKEYP
7761 + pax_force_retaddr_bts
7762 ret
7763
7764 /*
7765 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7766 #ifndef __x86_64__
7767 popl KEYP
7768 #endif
7769 + pax_force_retaddr 0, 1
7770 ret
7771 +ENDPROC(aesni_set_key)
7772
7773 /*
7774 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7775 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7776 popl KLEN
7777 popl KEYP
7778 #endif
7779 + pax_force_retaddr 0, 1
7780 ret
7781 +ENDPROC(aesni_enc)
7782
7783 /*
7784 * _aesni_enc1: internal ABI
7785 @@ -1959,6 +1972,7 @@ _aesni_enc1:
7786 AESENC KEY STATE
7787 movaps 0x70(TKEYP), KEY
7788 AESENCLAST KEY STATE
7789 + pax_force_retaddr_bts
7790 ret
7791
7792 /*
7793 @@ -2067,6 +2081,7 @@ _aesni_enc4:
7794 AESENCLAST KEY STATE2
7795 AESENCLAST KEY STATE3
7796 AESENCLAST KEY STATE4
7797 + pax_force_retaddr_bts
7798 ret
7799
7800 /*
7801 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7802 popl KLEN
7803 popl KEYP
7804 #endif
7805 + pax_force_retaddr 0, 1
7806 ret
7807 +ENDPROC(aesni_dec)
7808
7809 /*
7810 * _aesni_dec1: internal ABI
7811 @@ -2146,6 +2163,7 @@ _aesni_dec1:
7812 AESDEC KEY STATE
7813 movaps 0x70(TKEYP), KEY
7814 AESDECLAST KEY STATE
7815 + pax_force_retaddr_bts
7816 ret
7817
7818 /*
7819 @@ -2254,6 +2272,7 @@ _aesni_dec4:
7820 AESDECLAST KEY STATE2
7821 AESDECLAST KEY STATE3
7822 AESDECLAST KEY STATE4
7823 + pax_force_retaddr_bts
7824 ret
7825
7826 /*
7827 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7828 popl KEYP
7829 popl LEN
7830 #endif
7831 + pax_force_retaddr 0, 1
7832 ret
7833 +ENDPROC(aesni_ecb_enc)
7834
7835 /*
7836 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7837 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7838 popl KEYP
7839 popl LEN
7840 #endif
7841 + pax_force_retaddr 0, 1
7842 ret
7843 +ENDPROC(aesni_ecb_dec)
7844
7845 /*
7846 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7847 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7848 popl LEN
7849 popl IVP
7850 #endif
7851 + pax_force_retaddr 0, 1
7852 ret
7853 +ENDPROC(aesni_cbc_enc)
7854
7855 /*
7856 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7857 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7858 popl LEN
7859 popl IVP
7860 #endif
7861 + pax_force_retaddr 0, 1
7862 ret
7863 +ENDPROC(aesni_cbc_dec)
7864
7865 #ifdef __x86_64__
7866 .align 16
7867 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
7868 mov $1, TCTR_LOW
7869 MOVQ_R64_XMM TCTR_LOW INC
7870 MOVQ_R64_XMM CTR TCTR_LOW
7871 + pax_force_retaddr_bts
7872 ret
7873
7874 /*
7875 @@ -2552,6 +2580,7 @@ _aesni_inc:
7876 .Linc_low:
7877 movaps CTR, IV
7878 PSHUFB_XMM BSWAP_MASK IV
7879 + pax_force_retaddr_bts
7880 ret
7881
7882 /*
7883 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7884 .Lctr_enc_ret:
7885 movups IV, (IVP)
7886 .Lctr_enc_just_ret:
7887 + pax_force_retaddr 0, 1
7888 ret
7889 +ENDPROC(aesni_ctr_enc)
7890 #endif
7891 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7892 index 391d245..67f35c2 100644
7893 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7894 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7895 @@ -20,6 +20,8 @@
7896 *
7897 */
7898
7899 +#include <asm/alternative-asm.h>
7900 +
7901 .file "blowfish-x86_64-asm.S"
7902 .text
7903
7904 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
7905 jnz __enc_xor;
7906
7907 write_block();
7908 + pax_force_retaddr 0, 1
7909 ret;
7910 __enc_xor:
7911 xor_block();
7912 + pax_force_retaddr 0, 1
7913 ret;
7914
7915 .align 8
7916 @@ -188,6 +192,7 @@ blowfish_dec_blk:
7917
7918 movq %r11, %rbp;
7919
7920 + pax_force_retaddr 0, 1
7921 ret;
7922
7923 /**********************************************************************
7924 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7925
7926 popq %rbx;
7927 popq %rbp;
7928 + pax_force_retaddr 0, 1
7929 ret;
7930
7931 __enc_xor4:
7932 @@ -349,6 +355,7 @@ __enc_xor4:
7933
7934 popq %rbx;
7935 popq %rbp;
7936 + pax_force_retaddr 0, 1
7937 ret;
7938
7939 .align 8
7940 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7941 popq %rbx;
7942 popq %rbp;
7943
7944 + pax_force_retaddr 0, 1
7945 ret;
7946
7947 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7948 index 6214a9b..1f4fc9a 100644
7949 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7950 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7951 @@ -1,3 +1,5 @@
7952 +#include <asm/alternative-asm.h>
7953 +
7954 # enter ECRYPT_encrypt_bytes
7955 .text
7956 .p2align 5
7957 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7958 add %r11,%rsp
7959 mov %rdi,%rax
7960 mov %rsi,%rdx
7961 + pax_force_retaddr 0, 1
7962 ret
7963 # bytesatleast65:
7964 ._bytesatleast65:
7965 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7966 add %r11,%rsp
7967 mov %rdi,%rax
7968 mov %rsi,%rdx
7969 + pax_force_retaddr
7970 ret
7971 # enter ECRYPT_ivsetup
7972 .text
7973 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7974 add %r11,%rsp
7975 mov %rdi,%rax
7976 mov %rsi,%rdx
7977 + pax_force_retaddr
7978 ret
7979 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7980 index 7f24a15..9cd3ffe 100644
7981 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7982 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
7983 @@ -24,6 +24,8 @@
7984 *
7985 */
7986
7987 +#include <asm/alternative-asm.h>
7988 +
7989 .file "serpent-sse2-x86_64-asm_64.S"
7990 .text
7991
7992 @@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
7993 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
7994 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
7995
7996 + pax_force_retaddr
7997 ret;
7998
7999 __enc_xor8:
8000 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8001 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8002
8003 + pax_force_retaddr
8004 ret;
8005
8006 .align 8
8007 @@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8008 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8009 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8010
8011 + pax_force_retaddr
8012 ret;
8013 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8014 index b2c2f57..8470cab 100644
8015 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8016 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8017 @@ -28,6 +28,8 @@
8018 * (at your option) any later version.
8019 */
8020
8021 +#include <asm/alternative-asm.h>
8022 +
8023 #define CTX %rdi // arg1
8024 #define BUF %rsi // arg2
8025 #define CNT %rdx // arg3
8026 @@ -104,6 +106,7 @@
8027 pop %r12
8028 pop %rbp
8029 pop %rbx
8030 + pax_force_retaddr 0, 1
8031 ret
8032
8033 .size \name, .-\name
8034 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8035 index 5b012a2..36d5364 100644
8036 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8037 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8038 @@ -20,6 +20,8 @@
8039 *
8040 */
8041
8042 +#include <asm/alternative-asm.h>
8043 +
8044 .file "twofish-x86_64-asm-3way.S"
8045 .text
8046
8047 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8048 popq %r13;
8049 popq %r14;
8050 popq %r15;
8051 + pax_force_retaddr 0, 1
8052 ret;
8053
8054 __enc_xor3:
8055 @@ -271,6 +274,7 @@ __enc_xor3:
8056 popq %r13;
8057 popq %r14;
8058 popq %r15;
8059 + pax_force_retaddr 0, 1
8060 ret;
8061
8062 .global twofish_dec_blk_3way
8063 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8064 popq %r13;
8065 popq %r14;
8066 popq %r15;
8067 + pax_force_retaddr 0, 1
8068 ret;
8069
8070 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8071 index 7bcf3fc..f53832f 100644
8072 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8073 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8074 @@ -21,6 +21,7 @@
8075 .text
8076
8077 #include <asm/asm-offsets.h>
8078 +#include <asm/alternative-asm.h>
8079
8080 #define a_offset 0
8081 #define b_offset 4
8082 @@ -268,6 +269,7 @@ twofish_enc_blk:
8083
8084 popq R1
8085 movq $1,%rax
8086 + pax_force_retaddr 0, 1
8087 ret
8088
8089 twofish_dec_blk:
8090 @@ -319,4 +321,5 @@ twofish_dec_blk:
8091
8092 popq R1
8093 movq $1,%rax
8094 + pax_force_retaddr 0, 1
8095 ret
8096 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8097 index 39e4909..887aa7e 100644
8098 --- a/arch/x86/ia32/ia32_aout.c
8099 +++ b/arch/x86/ia32/ia32_aout.c
8100 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8101 unsigned long dump_start, dump_size;
8102 struct user32 dump;
8103
8104 + memset(&dump, 0, sizeof(dump));
8105 +
8106 fs = get_fs();
8107 set_fs(KERNEL_DS);
8108 has_dumped = 1;
8109 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8110 index 6557769..ef6ae89 100644
8111 --- a/arch/x86/ia32/ia32_signal.c
8112 +++ b/arch/x86/ia32/ia32_signal.c
8113 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8114 }
8115 seg = get_fs();
8116 set_fs(KERNEL_DS);
8117 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8118 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8119 set_fs(seg);
8120 if (ret >= 0 && uoss_ptr) {
8121 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8122 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8123 */
8124 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8125 size_t frame_size,
8126 - void **fpstate)
8127 + void __user **fpstate)
8128 {
8129 unsigned long sp;
8130
8131 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8132
8133 if (used_math()) {
8134 sp = sp - sig_xstate_ia32_size;
8135 - *fpstate = (struct _fpstate_ia32 *) sp;
8136 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8137 if (save_i387_xstate_ia32(*fpstate) < 0)
8138 return (void __user *) -1L;
8139 }
8140 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8141 sp -= frame_size;
8142 /* Align the stack pointer according to the i386 ABI,
8143 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8144 - sp = ((sp + 4) & -16ul) - 4;
8145 + sp = ((sp - 12) & -16ul) - 4;
8146 return (void __user *) sp;
8147 }
8148
8149 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8150 * These are actually not used anymore, but left because some
8151 * gdb versions depend on them as a marker.
8152 */
8153 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8154 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8155 } put_user_catch(err);
8156
8157 if (err)
8158 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8159 0xb8,
8160 __NR_ia32_rt_sigreturn,
8161 0x80cd,
8162 - 0,
8163 + 0
8164 };
8165
8166 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8167 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8168
8169 if (ka->sa.sa_flags & SA_RESTORER)
8170 restorer = ka->sa.sa_restorer;
8171 + else if (current->mm->context.vdso)
8172 + /* Return stub is in 32bit vsyscall page */
8173 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8174 else
8175 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8176 - rt_sigreturn);
8177 + restorer = &frame->retcode;
8178 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8179
8180 /*
8181 * Not actually used anymore, but left because some gdb
8182 * versions need it.
8183 */
8184 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8185 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8186 } put_user_catch(err);
8187
8188 if (err)
8189 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8190 index e3e7340..05ed805 100644
8191 --- a/arch/x86/ia32/ia32entry.S
8192 +++ b/arch/x86/ia32/ia32entry.S
8193 @@ -13,8 +13,10 @@
8194 #include <asm/thread_info.h>
8195 #include <asm/segment.h>
8196 #include <asm/irqflags.h>
8197 +#include <asm/pgtable.h>
8198 #include <linux/linkage.h>
8199 #include <linux/err.h>
8200 +#include <asm/alternative-asm.h>
8201
8202 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8203 #include <linux/elf-em.h>
8204 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8205 ENDPROC(native_irq_enable_sysexit)
8206 #endif
8207
8208 + .macro pax_enter_kernel_user
8209 + pax_set_fptr_mask
8210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8211 + call pax_enter_kernel_user
8212 +#endif
8213 + .endm
8214 +
8215 + .macro pax_exit_kernel_user
8216 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8217 + call pax_exit_kernel_user
8218 +#endif
8219 +#ifdef CONFIG_PAX_RANDKSTACK
8220 + pushq %rax
8221 + pushq %r11
8222 + call pax_randomize_kstack
8223 + popq %r11
8224 + popq %rax
8225 +#endif
8226 + .endm
8227 +
8228 +.macro pax_erase_kstack
8229 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8230 + call pax_erase_kstack
8231 +#endif
8232 +.endm
8233 +
8234 /*
8235 * 32bit SYSENTER instruction entry.
8236 *
8237 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8238 CFI_REGISTER rsp,rbp
8239 SWAPGS_UNSAFE_STACK
8240 movq PER_CPU_VAR(kernel_stack), %rsp
8241 - addq $(KERNEL_STACK_OFFSET),%rsp
8242 - /*
8243 - * No need to follow this irqs on/off section: the syscall
8244 - * disabled irqs, here we enable it straight after entry:
8245 - */
8246 - ENABLE_INTERRUPTS(CLBR_NONE)
8247 movl %ebp,%ebp /* zero extension */
8248 pushq_cfi $__USER32_DS
8249 /*CFI_REL_OFFSET ss,0*/
8250 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8251 CFI_REL_OFFSET rsp,0
8252 pushfq_cfi
8253 /*CFI_REL_OFFSET rflags,0*/
8254 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8255 - CFI_REGISTER rip,r10
8256 + orl $X86_EFLAGS_IF,(%rsp)
8257 + GET_THREAD_INFO(%r11)
8258 + movl TI_sysenter_return(%r11), %r11d
8259 + CFI_REGISTER rip,r11
8260 pushq_cfi $__USER32_CS
8261 /*CFI_REL_OFFSET cs,0*/
8262 movl %eax, %eax
8263 - pushq_cfi %r10
8264 + pushq_cfi %r11
8265 CFI_REL_OFFSET rip,0
8266 pushq_cfi %rax
8267 cld
8268 SAVE_ARGS 0,1,0
8269 + pax_enter_kernel_user
8270 + /*
8271 + * No need to follow this irqs on/off section: the syscall
8272 + * disabled irqs, here we enable it straight after entry:
8273 + */
8274 + ENABLE_INTERRUPTS(CLBR_NONE)
8275 /* no need to do an access_ok check here because rbp has been
8276 32bit zero extended */
8277 +
8278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8279 + mov $PAX_USER_SHADOW_BASE,%r11
8280 + add %r11,%rbp
8281 +#endif
8282 +
8283 1: movl (%rbp),%ebp
8284 .section __ex_table,"a"
8285 .quad 1b,ia32_badarg
8286 .previous
8287 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8288 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8289 + GET_THREAD_INFO(%r11)
8290 + orl $TS_COMPAT,TI_status(%r11)
8291 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8292 CFI_REMEMBER_STATE
8293 jnz sysenter_tracesys
8294 cmpq $(IA32_NR_syscalls-1),%rax
8295 @@ -160,12 +197,15 @@ sysenter_do_call:
8296 sysenter_dispatch:
8297 call *ia32_sys_call_table(,%rax,8)
8298 movq %rax,RAX-ARGOFFSET(%rsp)
8299 + GET_THREAD_INFO(%r11)
8300 DISABLE_INTERRUPTS(CLBR_NONE)
8301 TRACE_IRQS_OFF
8302 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8303 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8304 jnz sysexit_audit
8305 sysexit_from_sys_call:
8306 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8307 + pax_exit_kernel_user
8308 + pax_erase_kstack
8309 + andl $~TS_COMPAT,TI_status(%r11)
8310 /* clear IF, that popfq doesn't enable interrupts early */
8311 andl $~0x200,EFLAGS-R11(%rsp)
8312 movl RIP-R11(%rsp),%edx /* User %eip */
8313 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8314 movl %eax,%esi /* 2nd arg: syscall number */
8315 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8316 call __audit_syscall_entry
8317 +
8318 + pax_erase_kstack
8319 +
8320 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8321 cmpq $(IA32_NR_syscalls-1),%rax
8322 ja ia32_badsys
8323 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8324 .endm
8325
8326 .macro auditsys_exit exit
8327 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8328 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8329 jnz ia32_ret_from_sys_call
8330 TRACE_IRQS_ON
8331 sti
8332 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8333 1: setbe %al /* 1 if error, 0 if not */
8334 movzbl %al,%edi /* zero-extend that into %edi */
8335 call __audit_syscall_exit
8336 + GET_THREAD_INFO(%r11)
8337 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8338 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8339 cli
8340 TRACE_IRQS_OFF
8341 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8342 + testl %edi,TI_flags(%r11)
8343 jz \exit
8344 CLEAR_RREGS -ARGOFFSET
8345 jmp int_with_check
8346 @@ -235,7 +279,7 @@ sysexit_audit:
8347
8348 sysenter_tracesys:
8349 #ifdef CONFIG_AUDITSYSCALL
8350 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8351 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8352 jz sysenter_auditsys
8353 #endif
8354 SAVE_REST
8355 @@ -243,6 +287,9 @@ sysenter_tracesys:
8356 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8357 movq %rsp,%rdi /* &pt_regs -> arg1 */
8358 call syscall_trace_enter
8359 +
8360 + pax_erase_kstack
8361 +
8362 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8363 RESTORE_REST
8364 cmpq $(IA32_NR_syscalls-1),%rax
8365 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8366 ENTRY(ia32_cstar_target)
8367 CFI_STARTPROC32 simple
8368 CFI_SIGNAL_FRAME
8369 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8370 + CFI_DEF_CFA rsp,0
8371 CFI_REGISTER rip,rcx
8372 /*CFI_REGISTER rflags,r11*/
8373 SWAPGS_UNSAFE_STACK
8374 movl %esp,%r8d
8375 CFI_REGISTER rsp,r8
8376 movq PER_CPU_VAR(kernel_stack),%rsp
8377 + SAVE_ARGS 8*6,0,0
8378 + pax_enter_kernel_user
8379 /*
8380 * No need to follow this irqs on/off section: the syscall
8381 * disabled irqs and here we enable it straight after entry:
8382 */
8383 ENABLE_INTERRUPTS(CLBR_NONE)
8384 - SAVE_ARGS 8,0,0
8385 movl %eax,%eax /* zero extension */
8386 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8387 movq %rcx,RIP-ARGOFFSET(%rsp)
8388 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8389 /* no need to do an access_ok check here because r8 has been
8390 32bit zero extended */
8391 /* hardware stack frame is complete now */
8392 +
8393 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8394 + mov $PAX_USER_SHADOW_BASE,%r11
8395 + add %r11,%r8
8396 +#endif
8397 +
8398 1: movl (%r8),%r9d
8399 .section __ex_table,"a"
8400 .quad 1b,ia32_badarg
8401 .previous
8402 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8403 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8404 + GET_THREAD_INFO(%r11)
8405 + orl $TS_COMPAT,TI_status(%r11)
8406 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8407 CFI_REMEMBER_STATE
8408 jnz cstar_tracesys
8409 cmpq $IA32_NR_syscalls-1,%rax
8410 @@ -317,12 +372,15 @@ cstar_do_call:
8411 cstar_dispatch:
8412 call *ia32_sys_call_table(,%rax,8)
8413 movq %rax,RAX-ARGOFFSET(%rsp)
8414 + GET_THREAD_INFO(%r11)
8415 DISABLE_INTERRUPTS(CLBR_NONE)
8416 TRACE_IRQS_OFF
8417 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8418 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8419 jnz sysretl_audit
8420 sysretl_from_sys_call:
8421 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8422 + pax_exit_kernel_user
8423 + pax_erase_kstack
8424 + andl $~TS_COMPAT,TI_status(%r11)
8425 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8426 movl RIP-ARGOFFSET(%rsp),%ecx
8427 CFI_REGISTER rip,rcx
8428 @@ -350,7 +408,7 @@ sysretl_audit:
8429
8430 cstar_tracesys:
8431 #ifdef CONFIG_AUDITSYSCALL
8432 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8433 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8434 jz cstar_auditsys
8435 #endif
8436 xchgl %r9d,%ebp
8437 @@ -359,6 +417,9 @@ cstar_tracesys:
8438 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8439 movq %rsp,%rdi /* &pt_regs -> arg1 */
8440 call syscall_trace_enter
8441 +
8442 + pax_erase_kstack
8443 +
8444 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8445 RESTORE_REST
8446 xchgl %ebp,%r9d
8447 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8448 CFI_REL_OFFSET rip,RIP-RIP
8449 PARAVIRT_ADJUST_EXCEPTION_FRAME
8450 SWAPGS
8451 - /*
8452 - * No need to follow this irqs on/off section: the syscall
8453 - * disabled irqs and here we enable it straight after entry:
8454 - */
8455 - ENABLE_INTERRUPTS(CLBR_NONE)
8456 movl %eax,%eax
8457 pushq_cfi %rax
8458 cld
8459 /* note the registers are not zero extended to the sf.
8460 this could be a problem. */
8461 SAVE_ARGS 0,1,0
8462 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8463 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8464 + pax_enter_kernel_user
8465 + /*
8466 + * No need to follow this irqs on/off section: the syscall
8467 + * disabled irqs and here we enable it straight after entry:
8468 + */
8469 + ENABLE_INTERRUPTS(CLBR_NONE)
8470 + GET_THREAD_INFO(%r11)
8471 + orl $TS_COMPAT,TI_status(%r11)
8472 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8473 jnz ia32_tracesys
8474 cmpq $(IA32_NR_syscalls-1),%rax
8475 ja ia32_badsys
8476 @@ -435,6 +498,9 @@ ia32_tracesys:
8477 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8478 movq %rsp,%rdi /* &pt_regs -> arg1 */
8479 call syscall_trace_enter
8480 +
8481 + pax_erase_kstack
8482 +
8483 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8484 RESTORE_REST
8485 cmpq $(IA32_NR_syscalls-1),%rax
8486 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8487 index f6f5c53..b358b28 100644
8488 --- a/arch/x86/ia32/sys_ia32.c
8489 +++ b/arch/x86/ia32/sys_ia32.c
8490 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8491 */
8492 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8493 {
8494 - typeof(ubuf->st_uid) uid = 0;
8495 - typeof(ubuf->st_gid) gid = 0;
8496 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8497 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8498 SET_UID(uid, stat->uid);
8499 SET_GID(gid, stat->gid);
8500 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8501 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8502 }
8503 set_fs(KERNEL_DS);
8504 ret = sys_rt_sigprocmask(how,
8505 - set ? (sigset_t __user *)&s : NULL,
8506 - oset ? (sigset_t __user *)&s : NULL,
8507 + set ? (sigset_t __force_user *)&s : NULL,
8508 + oset ? (sigset_t __force_user *)&s : NULL,
8509 sigsetsize);
8510 set_fs(old_fs);
8511 if (ret)
8512 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8513 return alarm_setitimer(seconds);
8514 }
8515
8516 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8517 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8518 int options)
8519 {
8520 return compat_sys_wait4(pid, stat_addr, options, NULL);
8521 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8522 mm_segment_t old_fs = get_fs();
8523
8524 set_fs(KERNEL_DS);
8525 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8526 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8527 set_fs(old_fs);
8528 if (put_compat_timespec(&t, interval))
8529 return -EFAULT;
8530 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8531 mm_segment_t old_fs = get_fs();
8532
8533 set_fs(KERNEL_DS);
8534 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8535 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8536 set_fs(old_fs);
8537 if (!ret) {
8538 switch (_NSIG_WORDS) {
8539 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8540 if (copy_siginfo_from_user32(&info, uinfo))
8541 return -EFAULT;
8542 set_fs(KERNEL_DS);
8543 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8544 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8545 set_fs(old_fs);
8546 return ret;
8547 }
8548 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8549 return -EFAULT;
8550
8551 set_fs(KERNEL_DS);
8552 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8553 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8554 count);
8555 set_fs(old_fs);
8556
8557 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8558 index 952bd01..7692c6f 100644
8559 --- a/arch/x86/include/asm/alternative-asm.h
8560 +++ b/arch/x86/include/asm/alternative-asm.h
8561 @@ -15,6 +15,45 @@
8562 .endm
8563 #endif
8564
8565 +#ifdef KERNEXEC_PLUGIN
8566 + .macro pax_force_retaddr_bts rip=0
8567 + btsq $63,\rip(%rsp)
8568 + .endm
8569 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8570 + .macro pax_force_retaddr rip=0, reload=0
8571 + btsq $63,\rip(%rsp)
8572 + .endm
8573 + .macro pax_force_fptr ptr
8574 + btsq $63,\ptr
8575 + .endm
8576 + .macro pax_set_fptr_mask
8577 + .endm
8578 +#endif
8579 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8580 + .macro pax_force_retaddr rip=0, reload=0
8581 + .if \reload
8582 + pax_set_fptr_mask
8583 + .endif
8584 + orq %r10,\rip(%rsp)
8585 + .endm
8586 + .macro pax_force_fptr ptr
8587 + orq %r10,\ptr
8588 + .endm
8589 + .macro pax_set_fptr_mask
8590 + movabs $0x8000000000000000,%r10
8591 + .endm
8592 +#endif
8593 +#else
8594 + .macro pax_force_retaddr rip=0, reload=0
8595 + .endm
8596 + .macro pax_force_fptr ptr
8597 + .endm
8598 + .macro pax_force_retaddr_bts rip=0
8599 + .endm
8600 + .macro pax_set_fptr_mask
8601 + .endm
8602 +#endif
8603 +
8604 .macro altinstruction_entry orig alt feature orig_len alt_len
8605 .long \orig - .
8606 .long \alt - .
8607 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8608 index 37ad100..7d47faa 100644
8609 --- a/arch/x86/include/asm/alternative.h
8610 +++ b/arch/x86/include/asm/alternative.h
8611 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8612 ".section .discard,\"aw\",@progbits\n" \
8613 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8614 ".previous\n" \
8615 - ".section .altinstr_replacement, \"ax\"\n" \
8616 + ".section .altinstr_replacement, \"a\"\n" \
8617 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8618 ".previous"
8619
8620 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8621 index 3ab9bdd..238033e 100644
8622 --- a/arch/x86/include/asm/apic.h
8623 +++ b/arch/x86/include/asm/apic.h
8624 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8625
8626 #ifdef CONFIG_X86_LOCAL_APIC
8627
8628 -extern unsigned int apic_verbosity;
8629 +extern int apic_verbosity;
8630 extern int local_apic_timer_c2_ok;
8631
8632 extern int disable_apic;
8633 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8634 index 20370c6..a2eb9b0 100644
8635 --- a/arch/x86/include/asm/apm.h
8636 +++ b/arch/x86/include/asm/apm.h
8637 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8638 __asm__ __volatile__(APM_DO_ZERO_SEGS
8639 "pushl %%edi\n\t"
8640 "pushl %%ebp\n\t"
8641 - "lcall *%%cs:apm_bios_entry\n\t"
8642 + "lcall *%%ss:apm_bios_entry\n\t"
8643 "setc %%al\n\t"
8644 "popl %%ebp\n\t"
8645 "popl %%edi\n\t"
8646 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8647 __asm__ __volatile__(APM_DO_ZERO_SEGS
8648 "pushl %%edi\n\t"
8649 "pushl %%ebp\n\t"
8650 - "lcall *%%cs:apm_bios_entry\n\t"
8651 + "lcall *%%ss:apm_bios_entry\n\t"
8652 "setc %%bl\n\t"
8653 "popl %%ebp\n\t"
8654 "popl %%edi\n\t"
8655 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8656 index 58cb6d4..ca9010d 100644
8657 --- a/arch/x86/include/asm/atomic.h
8658 +++ b/arch/x86/include/asm/atomic.h
8659 @@ -22,7 +22,18 @@
8660 */
8661 static inline int atomic_read(const atomic_t *v)
8662 {
8663 - return (*(volatile int *)&(v)->counter);
8664 + return (*(volatile const int *)&(v)->counter);
8665 +}
8666 +
8667 +/**
8668 + * atomic_read_unchecked - read atomic variable
8669 + * @v: pointer of type atomic_unchecked_t
8670 + *
8671 + * Atomically reads the value of @v.
8672 + */
8673 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8674 +{
8675 + return (*(volatile const int *)&(v)->counter);
8676 }
8677
8678 /**
8679 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8680 }
8681
8682 /**
8683 + * atomic_set_unchecked - set atomic variable
8684 + * @v: pointer of type atomic_unchecked_t
8685 + * @i: required value
8686 + *
8687 + * Atomically sets the value of @v to @i.
8688 + */
8689 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8690 +{
8691 + v->counter = i;
8692 +}
8693 +
8694 +/**
8695 * atomic_add - add integer to atomic variable
8696 * @i: integer value to add
8697 * @v: pointer of type atomic_t
8698 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8699 */
8700 static inline void atomic_add(int i, atomic_t *v)
8701 {
8702 - asm volatile(LOCK_PREFIX "addl %1,%0"
8703 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8704 +
8705 +#ifdef CONFIG_PAX_REFCOUNT
8706 + "jno 0f\n"
8707 + LOCK_PREFIX "subl %1,%0\n"
8708 + "int $4\n0:\n"
8709 + _ASM_EXTABLE(0b, 0b)
8710 +#endif
8711 +
8712 + : "+m" (v->counter)
8713 + : "ir" (i));
8714 +}
8715 +
8716 +/**
8717 + * atomic_add_unchecked - add integer to atomic variable
8718 + * @i: integer value to add
8719 + * @v: pointer of type atomic_unchecked_t
8720 + *
8721 + * Atomically adds @i to @v.
8722 + */
8723 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8724 +{
8725 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8726 : "+m" (v->counter)
8727 : "ir" (i));
8728 }
8729 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8730 */
8731 static inline void atomic_sub(int i, atomic_t *v)
8732 {
8733 - asm volatile(LOCK_PREFIX "subl %1,%0"
8734 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8735 +
8736 +#ifdef CONFIG_PAX_REFCOUNT
8737 + "jno 0f\n"
8738 + LOCK_PREFIX "addl %1,%0\n"
8739 + "int $4\n0:\n"
8740 + _ASM_EXTABLE(0b, 0b)
8741 +#endif
8742 +
8743 + : "+m" (v->counter)
8744 + : "ir" (i));
8745 +}
8746 +
8747 +/**
8748 + * atomic_sub_unchecked - subtract integer from atomic variable
8749 + * @i: integer value to subtract
8750 + * @v: pointer of type atomic_unchecked_t
8751 + *
8752 + * Atomically subtracts @i from @v.
8753 + */
8754 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8755 +{
8756 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8757 : "+m" (v->counter)
8758 : "ir" (i));
8759 }
8760 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8761 {
8762 unsigned char c;
8763
8764 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8765 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8766 +
8767 +#ifdef CONFIG_PAX_REFCOUNT
8768 + "jno 0f\n"
8769 + LOCK_PREFIX "addl %2,%0\n"
8770 + "int $4\n0:\n"
8771 + _ASM_EXTABLE(0b, 0b)
8772 +#endif
8773 +
8774 + "sete %1\n"
8775 : "+m" (v->counter), "=qm" (c)
8776 : "ir" (i) : "memory");
8777 return c;
8778 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8779 */
8780 static inline void atomic_inc(atomic_t *v)
8781 {
8782 - asm volatile(LOCK_PREFIX "incl %0"
8783 + asm volatile(LOCK_PREFIX "incl %0\n"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 + "jno 0f\n"
8787 + LOCK_PREFIX "decl %0\n"
8788 + "int $4\n0:\n"
8789 + _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792 + : "+m" (v->counter));
8793 +}
8794 +
8795 +/**
8796 + * atomic_inc_unchecked - increment atomic variable
8797 + * @v: pointer of type atomic_unchecked_t
8798 + *
8799 + * Atomically increments @v by 1.
8800 + */
8801 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8802 +{
8803 + asm volatile(LOCK_PREFIX "incl %0\n"
8804 : "+m" (v->counter));
8805 }
8806
8807 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8808 */
8809 static inline void atomic_dec(atomic_t *v)
8810 {
8811 - asm volatile(LOCK_PREFIX "decl %0"
8812 + asm volatile(LOCK_PREFIX "decl %0\n"
8813 +
8814 +#ifdef CONFIG_PAX_REFCOUNT
8815 + "jno 0f\n"
8816 + LOCK_PREFIX "incl %0\n"
8817 + "int $4\n0:\n"
8818 + _ASM_EXTABLE(0b, 0b)
8819 +#endif
8820 +
8821 + : "+m" (v->counter));
8822 +}
8823 +
8824 +/**
8825 + * atomic_dec_unchecked - decrement atomic variable
8826 + * @v: pointer of type atomic_unchecked_t
8827 + *
8828 + * Atomically decrements @v by 1.
8829 + */
8830 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8831 +{
8832 + asm volatile(LOCK_PREFIX "decl %0\n"
8833 : "+m" (v->counter));
8834 }
8835
8836 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8837 {
8838 unsigned char c;
8839
8840 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8841 + asm volatile(LOCK_PREFIX "decl %0\n"
8842 +
8843 +#ifdef CONFIG_PAX_REFCOUNT
8844 + "jno 0f\n"
8845 + LOCK_PREFIX "incl %0\n"
8846 + "int $4\n0:\n"
8847 + _ASM_EXTABLE(0b, 0b)
8848 +#endif
8849 +
8850 + "sete %1\n"
8851 : "+m" (v->counter), "=qm" (c)
8852 : : "memory");
8853 return c != 0;
8854 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8855 {
8856 unsigned char c;
8857
8858 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8859 + asm volatile(LOCK_PREFIX "incl %0\n"
8860 +
8861 +#ifdef CONFIG_PAX_REFCOUNT
8862 + "jno 0f\n"
8863 + LOCK_PREFIX "decl %0\n"
8864 + "int $4\n0:\n"
8865 + _ASM_EXTABLE(0b, 0b)
8866 +#endif
8867 +
8868 + "sete %1\n"
8869 + : "+m" (v->counter), "=qm" (c)
8870 + : : "memory");
8871 + return c != 0;
8872 +}
8873 +
8874 +/**
8875 + * atomic_inc_and_test_unchecked - increment and test
8876 + * @v: pointer of type atomic_unchecked_t
8877 + *
8878 + * Atomically increments @v by 1
8879 + * and returns true if the result is zero, or false for all
8880 + * other cases.
8881 + */
8882 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8883 +{
8884 + unsigned char c;
8885 +
8886 + asm volatile(LOCK_PREFIX "incl %0\n"
8887 + "sete %1\n"
8888 : "+m" (v->counter), "=qm" (c)
8889 : : "memory");
8890 return c != 0;
8891 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8892 {
8893 unsigned char c;
8894
8895 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8896 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8897 +
8898 +#ifdef CONFIG_PAX_REFCOUNT
8899 + "jno 0f\n"
8900 + LOCK_PREFIX "subl %2,%0\n"
8901 + "int $4\n0:\n"
8902 + _ASM_EXTABLE(0b, 0b)
8903 +#endif
8904 +
8905 + "sets %1\n"
8906 : "+m" (v->counter), "=qm" (c)
8907 : "ir" (i) : "memory");
8908 return c;
8909 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8910 goto no_xadd;
8911 #endif
8912 /* Modern 486+ processor */
8913 - return i + xadd(&v->counter, i);
8914 + return i + xadd_check_overflow(&v->counter, i);
8915
8916 #ifdef CONFIG_M386
8917 no_xadd: /* Legacy 386 processor */
8918 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8919 }
8920
8921 /**
8922 + * atomic_add_return_unchecked - add integer and return
8923 + * @i: integer value to add
8924 + * @v: pointer of type atomic_unchecked_t
8925 + *
8926 + * Atomically adds @i to @v and returns @i + @v
8927 + */
8928 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8929 +{
8930 +#ifdef CONFIG_M386
8931 + int __i;
8932 + unsigned long flags;
8933 + if (unlikely(boot_cpu_data.x86 <= 3))
8934 + goto no_xadd;
8935 +#endif
8936 + /* Modern 486+ processor */
8937 + return i + xadd(&v->counter, i);
8938 +
8939 +#ifdef CONFIG_M386
8940 +no_xadd: /* Legacy 386 processor */
8941 + raw_local_irq_save(flags);
8942 + __i = atomic_read_unchecked(v);
8943 + atomic_set_unchecked(v, i + __i);
8944 + raw_local_irq_restore(flags);
8945 + return i + __i;
8946 +#endif
8947 +}
8948 +
8949 +/**
8950 * atomic_sub_return - subtract integer and return
8951 * @v: pointer of type atomic_t
8952 * @i: integer value to subtract
8953 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8954 }
8955
8956 #define atomic_inc_return(v) (atomic_add_return(1, v))
8957 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8958 +{
8959 + return atomic_add_return_unchecked(1, v);
8960 +}
8961 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8962
8963 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8964 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8965 return cmpxchg(&v->counter, old, new);
8966 }
8967
8968 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8969 +{
8970 + return cmpxchg(&v->counter, old, new);
8971 +}
8972 +
8973 static inline int atomic_xchg(atomic_t *v, int new)
8974 {
8975 return xchg(&v->counter, new);
8976 }
8977
8978 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8979 +{
8980 + return xchg(&v->counter, new);
8981 +}
8982 +
8983 /**
8984 * __atomic_add_unless - add unless the number is already a given value
8985 * @v: pointer of type atomic_t
8986 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
8987 */
8988 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8989 {
8990 - int c, old;
8991 + int c, old, new;
8992 c = atomic_read(v);
8993 for (;;) {
8994 - if (unlikely(c == (u)))
8995 + if (unlikely(c == u))
8996 break;
8997 - old = atomic_cmpxchg((v), c, c + (a));
8998 +
8999 + asm volatile("addl %2,%0\n"
9000 +
9001 +#ifdef CONFIG_PAX_REFCOUNT
9002 + "jno 0f\n"
9003 + "subl %2,%0\n"
9004 + "int $4\n0:\n"
9005 + _ASM_EXTABLE(0b, 0b)
9006 +#endif
9007 +
9008 + : "=r" (new)
9009 + : "0" (c), "ir" (a));
9010 +
9011 + old = atomic_cmpxchg(v, c, new);
9012 if (likely(old == c))
9013 break;
9014 c = old;
9015 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9016 return c;
9017 }
9018
9019 +/**
9020 + * atomic_inc_not_zero_hint - increment if not null
9021 + * @v: pointer of type atomic_t
9022 + * @hint: probable value of the atomic before the increment
9023 + *
9024 + * This version of atomic_inc_not_zero() gives a hint of probable
9025 + * value of the atomic. This helps processor to not read the memory
9026 + * before doing the atomic read/modify/write cycle, lowering
9027 + * number of bus transactions on some arches.
9028 + *
9029 + * Returns: 0 if increment was not done, 1 otherwise.
9030 + */
9031 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9032 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9033 +{
9034 + int val, c = hint, new;
9035 +
9036 + /* sanity test, should be removed by compiler if hint is a constant */
9037 + if (!hint)
9038 + return __atomic_add_unless(v, 1, 0);
9039 +
9040 + do {
9041 + asm volatile("incl %0\n"
9042 +
9043 +#ifdef CONFIG_PAX_REFCOUNT
9044 + "jno 0f\n"
9045 + "decl %0\n"
9046 + "int $4\n0:\n"
9047 + _ASM_EXTABLE(0b, 0b)
9048 +#endif
9049 +
9050 + : "=r" (new)
9051 + : "0" (c));
9052 +
9053 + val = atomic_cmpxchg(v, c, new);
9054 + if (val == c)
9055 + return 1;
9056 + c = val;
9057 + } while (c);
9058 +
9059 + return 0;
9060 +}
9061
9062 /*
9063 * atomic_dec_if_positive - decrement by 1 if old value positive
9064 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9065 index fa13f0e..27c2e08 100644
9066 --- a/arch/x86/include/asm/atomic64_32.h
9067 +++ b/arch/x86/include/asm/atomic64_32.h
9068 @@ -12,6 +12,14 @@ typedef struct {
9069 u64 __aligned(8) counter;
9070 } atomic64_t;
9071
9072 +#ifdef CONFIG_PAX_REFCOUNT
9073 +typedef struct {
9074 + u64 __aligned(8) counter;
9075 +} atomic64_unchecked_t;
9076 +#else
9077 +typedef atomic64_t atomic64_unchecked_t;
9078 +#endif
9079 +
9080 #define ATOMIC64_INIT(val) { (val) }
9081
9082 #ifdef CONFIG_X86_CMPXCHG64
9083 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9084 }
9085
9086 /**
9087 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9088 + * @p: pointer to type atomic64_unchecked_t
9089 + * @o: expected value
9090 + * @n: new value
9091 + *
9092 + * Atomically sets @v to @n if it was equal to @o and returns
9093 + * the old value.
9094 + */
9095 +
9096 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9097 +{
9098 + return cmpxchg64(&v->counter, o, n);
9099 +}
9100 +
9101 +/**
9102 * atomic64_xchg - xchg atomic64 variable
9103 * @v: pointer to type atomic64_t
9104 * @n: value to assign
9105 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9106 }
9107
9108 /**
9109 + * atomic64_set_unchecked - set atomic64 variable
9110 + * @v: pointer to type atomic64_unchecked_t
9111 + * @n: value to assign
9112 + *
9113 + * Atomically sets the value of @v to @n.
9114 + */
9115 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9116 +{
9117 + unsigned high = (unsigned)(i >> 32);
9118 + unsigned low = (unsigned)i;
9119 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9120 + : "+b" (low), "+c" (high)
9121 + : "S" (v)
9122 + : "eax", "edx", "memory"
9123 + );
9124 +}
9125 +
9126 +/**
9127 * atomic64_read - read atomic64 variable
9128 * @v: pointer to type atomic64_t
9129 *
9130 @@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9131 }
9132
9133 /**
9134 + * atomic64_read_unchecked - read atomic64 variable
9135 + * @v: pointer to type atomic64_unchecked_t
9136 + *
9137 + * Atomically reads the value of @v and returns it.
9138 + */
9139 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9140 +{
9141 + long long r;
9142 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9143 + : "=A" (r), "+c" (v)
9144 + : : "memory"
9145 + );
9146 + return r;
9147 + }
9148 +
9149 +/**
9150 * atomic64_add_return - add and return
9151 * @i: integer value to add
9152 * @v: pointer to type atomic64_t
9153 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9154 return i;
9155 }
9156
9157 +/**
9158 + * atomic64_add_return_unchecked - add and return
9159 + * @i: integer value to add
9160 + * @v: pointer to type atomic64_unchecked_t
9161 + *
9162 + * Atomically adds @i to @v and returns @i + *@v
9163 + */
9164 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9165 +{
9166 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9167 + : "+A" (i), "+c" (v)
9168 + : : "memory"
9169 + );
9170 + return i;
9171 +}
9172 +
9173 /*
9174 * Other variants with different arithmetic operators:
9175 */
9176 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9177 return a;
9178 }
9179
9180 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9181 +{
9182 + long long a;
9183 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9184 + : "=A" (a)
9185 + : "S" (v)
9186 + : "memory", "ecx"
9187 + );
9188 + return a;
9189 +}
9190 +
9191 static inline long long atomic64_dec_return(atomic64_t *v)
9192 {
9193 long long a;
9194 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9195 }
9196
9197 /**
9198 + * atomic64_add_unchecked - add integer to atomic64 variable
9199 + * @i: integer value to add
9200 + * @v: pointer to type atomic64_unchecked_t
9201 + *
9202 + * Atomically adds @i to @v.
9203 + */
9204 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9205 +{
9206 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9207 + : "+A" (i), "+c" (v)
9208 + : : "memory"
9209 + );
9210 + return i;
9211 +}
9212 +
9213 +/**
9214 * atomic64_sub - subtract the atomic64 variable
9215 * @i: integer value to subtract
9216 * @v: pointer to type atomic64_t
9217 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9218 index 0e1cbfc..5623683 100644
9219 --- a/arch/x86/include/asm/atomic64_64.h
9220 +++ b/arch/x86/include/asm/atomic64_64.h
9221 @@ -18,7 +18,19 @@
9222 */
9223 static inline long atomic64_read(const atomic64_t *v)
9224 {
9225 - return (*(volatile long *)&(v)->counter);
9226 + return (*(volatile const long *)&(v)->counter);
9227 +}
9228 +
9229 +/**
9230 + * atomic64_read_unchecked - read atomic64 variable
9231 + * @v: pointer of type atomic64_unchecked_t
9232 + *
9233 + * Atomically reads the value of @v.
9234 + * Doesn't imply a read memory barrier.
9235 + */
9236 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9237 +{
9238 + return (*(volatile const long *)&(v)->counter);
9239 }
9240
9241 /**
9242 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9243 }
9244
9245 /**
9246 + * atomic64_set_unchecked - set atomic64 variable
9247 + * @v: pointer to type atomic64_unchecked_t
9248 + * @i: required value
9249 + *
9250 + * Atomically sets the value of @v to @i.
9251 + */
9252 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9253 +{
9254 + v->counter = i;
9255 +}
9256 +
9257 +/**
9258 * atomic64_add - add integer to atomic64 variable
9259 * @i: integer value to add
9260 * @v: pointer to type atomic64_t
9261 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9262 */
9263 static inline void atomic64_add(long i, atomic64_t *v)
9264 {
9265 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9266 +
9267 +#ifdef CONFIG_PAX_REFCOUNT
9268 + "jno 0f\n"
9269 + LOCK_PREFIX "subq %1,%0\n"
9270 + "int $4\n0:\n"
9271 + _ASM_EXTABLE(0b, 0b)
9272 +#endif
9273 +
9274 + : "=m" (v->counter)
9275 + : "er" (i), "m" (v->counter));
9276 +}
9277 +
9278 +/**
9279 + * atomic64_add_unchecked - add integer to atomic64 variable
9280 + * @i: integer value to add
9281 + * @v: pointer to type atomic64_unchecked_t
9282 + *
9283 + * Atomically adds @i to @v.
9284 + */
9285 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9286 +{
9287 asm volatile(LOCK_PREFIX "addq %1,%0"
9288 : "=m" (v->counter)
9289 : "er" (i), "m" (v->counter));
9290 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9291 */
9292 static inline void atomic64_sub(long i, atomic64_t *v)
9293 {
9294 - asm volatile(LOCK_PREFIX "subq %1,%0"
9295 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9296 +
9297 +#ifdef CONFIG_PAX_REFCOUNT
9298 + "jno 0f\n"
9299 + LOCK_PREFIX "addq %1,%0\n"
9300 + "int $4\n0:\n"
9301 + _ASM_EXTABLE(0b, 0b)
9302 +#endif
9303 +
9304 + : "=m" (v->counter)
9305 + : "er" (i), "m" (v->counter));
9306 +}
9307 +
9308 +/**
9309 + * atomic64_sub_unchecked - subtract the atomic64 variable
9310 + * @i: integer value to subtract
9311 + * @v: pointer to type atomic64_unchecked_t
9312 + *
9313 + * Atomically subtracts @i from @v.
9314 + */
9315 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9316 +{
9317 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9318 : "=m" (v->counter)
9319 : "er" (i), "m" (v->counter));
9320 }
9321 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9322 {
9323 unsigned char c;
9324
9325 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9326 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9327 +
9328 +#ifdef CONFIG_PAX_REFCOUNT
9329 + "jno 0f\n"
9330 + LOCK_PREFIX "addq %2,%0\n"
9331 + "int $4\n0:\n"
9332 + _ASM_EXTABLE(0b, 0b)
9333 +#endif
9334 +
9335 + "sete %1\n"
9336 : "=m" (v->counter), "=qm" (c)
9337 : "er" (i), "m" (v->counter) : "memory");
9338 return c;
9339 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9340 */
9341 static inline void atomic64_inc(atomic64_t *v)
9342 {
9343 + asm volatile(LOCK_PREFIX "incq %0\n"
9344 +
9345 +#ifdef CONFIG_PAX_REFCOUNT
9346 + "jno 0f\n"
9347 + LOCK_PREFIX "decq %0\n"
9348 + "int $4\n0:\n"
9349 + _ASM_EXTABLE(0b, 0b)
9350 +#endif
9351 +
9352 + : "=m" (v->counter)
9353 + : "m" (v->counter));
9354 +}
9355 +
9356 +/**
9357 + * atomic64_inc_unchecked - increment atomic64 variable
9358 + * @v: pointer to type atomic64_unchecked_t
9359 + *
9360 + * Atomically increments @v by 1.
9361 + */
9362 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9363 +{
9364 asm volatile(LOCK_PREFIX "incq %0"
9365 : "=m" (v->counter)
9366 : "m" (v->counter));
9367 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9368 */
9369 static inline void atomic64_dec(atomic64_t *v)
9370 {
9371 - asm volatile(LOCK_PREFIX "decq %0"
9372 + asm volatile(LOCK_PREFIX "decq %0\n"
9373 +
9374 +#ifdef CONFIG_PAX_REFCOUNT
9375 + "jno 0f\n"
9376 + LOCK_PREFIX "incq %0\n"
9377 + "int $4\n0:\n"
9378 + _ASM_EXTABLE(0b, 0b)
9379 +#endif
9380 +
9381 + : "=m" (v->counter)
9382 + : "m" (v->counter));
9383 +}
9384 +
9385 +/**
9386 + * atomic64_dec_unchecked - decrement atomic64 variable
9387 + * @v: pointer to type atomic64_t
9388 + *
9389 + * Atomically decrements @v by 1.
9390 + */
9391 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9392 +{
9393 + asm volatile(LOCK_PREFIX "decq %0\n"
9394 : "=m" (v->counter)
9395 : "m" (v->counter));
9396 }
9397 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9398 {
9399 unsigned char c;
9400
9401 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9402 + asm volatile(LOCK_PREFIX "decq %0\n"
9403 +
9404 +#ifdef CONFIG_PAX_REFCOUNT
9405 + "jno 0f\n"
9406 + LOCK_PREFIX "incq %0\n"
9407 + "int $4\n0:\n"
9408 + _ASM_EXTABLE(0b, 0b)
9409 +#endif
9410 +
9411 + "sete %1\n"
9412 : "=m" (v->counter), "=qm" (c)
9413 : "m" (v->counter) : "memory");
9414 return c != 0;
9415 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9416 {
9417 unsigned char c;
9418
9419 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9420 + asm volatile(LOCK_PREFIX "incq %0\n"
9421 +
9422 +#ifdef CONFIG_PAX_REFCOUNT
9423 + "jno 0f\n"
9424 + LOCK_PREFIX "decq %0\n"
9425 + "int $4\n0:\n"
9426 + _ASM_EXTABLE(0b, 0b)
9427 +#endif
9428 +
9429 + "sete %1\n"
9430 : "=m" (v->counter), "=qm" (c)
9431 : "m" (v->counter) : "memory");
9432 return c != 0;
9433 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9434 {
9435 unsigned char c;
9436
9437 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9438 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9439 +
9440 +#ifdef CONFIG_PAX_REFCOUNT
9441 + "jno 0f\n"
9442 + LOCK_PREFIX "subq %2,%0\n"
9443 + "int $4\n0:\n"
9444 + _ASM_EXTABLE(0b, 0b)
9445 +#endif
9446 +
9447 + "sets %1\n"
9448 : "=m" (v->counter), "=qm" (c)
9449 : "er" (i), "m" (v->counter) : "memory");
9450 return c;
9451 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9452 */
9453 static inline long atomic64_add_return(long i, atomic64_t *v)
9454 {
9455 + return i + xadd_check_overflow(&v->counter, i);
9456 +}
9457 +
9458 +/**
9459 + * atomic64_add_return_unchecked - add and return
9460 + * @i: integer value to add
9461 + * @v: pointer to type atomic64_unchecked_t
9462 + *
9463 + * Atomically adds @i to @v and returns @i + @v
9464 + */
9465 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9466 +{
9467 return i + xadd(&v->counter, i);
9468 }
9469
9470 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9471 }
9472
9473 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9474 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9475 +{
9476 + return atomic64_add_return_unchecked(1, v);
9477 +}
9478 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9479
9480 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9481 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9482 return cmpxchg(&v->counter, old, new);
9483 }
9484
9485 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9486 +{
9487 + return cmpxchg(&v->counter, old, new);
9488 +}
9489 +
9490 static inline long atomic64_xchg(atomic64_t *v, long new)
9491 {
9492 return xchg(&v->counter, new);
9493 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9494 */
9495 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9496 {
9497 - long c, old;
9498 + long c, old, new;
9499 c = atomic64_read(v);
9500 for (;;) {
9501 - if (unlikely(c == (u)))
9502 + if (unlikely(c == u))
9503 break;
9504 - old = atomic64_cmpxchg((v), c, c + (a));
9505 +
9506 + asm volatile("add %2,%0\n"
9507 +
9508 +#ifdef CONFIG_PAX_REFCOUNT
9509 + "jno 0f\n"
9510 + "sub %2,%0\n"
9511 + "int $4\n0:\n"
9512 + _ASM_EXTABLE(0b, 0b)
9513 +#endif
9514 +
9515 + : "=r" (new)
9516 + : "0" (c), "ir" (a));
9517 +
9518 + old = atomic64_cmpxchg(v, c, new);
9519 if (likely(old == c))
9520 break;
9521 c = old;
9522 }
9523 - return c != (u);
9524 + return c != u;
9525 }
9526
9527 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9528 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9529 index b97596e..9bd48b06 100644
9530 --- a/arch/x86/include/asm/bitops.h
9531 +++ b/arch/x86/include/asm/bitops.h
9532 @@ -38,7 +38,7 @@
9533 * a mask operation on a byte.
9534 */
9535 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9536 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9537 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9538 #define CONST_MASK(nr) (1 << ((nr) & 7))
9539
9540 /**
9541 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9542 index 5e1a2ee..c9f9533 100644
9543 --- a/arch/x86/include/asm/boot.h
9544 +++ b/arch/x86/include/asm/boot.h
9545 @@ -11,10 +11,15 @@
9546 #include <asm/pgtable_types.h>
9547
9548 /* Physical address where kernel should be loaded. */
9549 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9550 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9552 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9553
9554 +#ifndef __ASSEMBLY__
9555 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9556 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9557 +#endif
9558 +
9559 /* Minimum kernel alignment, as a power of two */
9560 #ifdef CONFIG_X86_64
9561 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9562 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9563 index 48f99f1..d78ebf9 100644
9564 --- a/arch/x86/include/asm/cache.h
9565 +++ b/arch/x86/include/asm/cache.h
9566 @@ -5,12 +5,13 @@
9567
9568 /* L1 cache line size */
9569 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9570 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9571 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9572
9573 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9574 +#define __read_only __attribute__((__section__(".data..read_only")))
9575
9576 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9577 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9578 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9579
9580 #ifdef CONFIG_X86_VSMP
9581 #ifdef CONFIG_SMP
9582 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9583 index 4e12668..501d239 100644
9584 --- a/arch/x86/include/asm/cacheflush.h
9585 +++ b/arch/x86/include/asm/cacheflush.h
9586 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9587 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9588
9589 if (pg_flags == _PGMT_DEFAULT)
9590 - return -1;
9591 + return ~0UL;
9592 else if (pg_flags == _PGMT_WC)
9593 return _PAGE_CACHE_WC;
9594 else if (pg_flags == _PGMT_UC_MINUS)
9595 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9596 index 46fc474..b02b0f9 100644
9597 --- a/arch/x86/include/asm/checksum_32.h
9598 +++ b/arch/x86/include/asm/checksum_32.h
9599 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9600 int len, __wsum sum,
9601 int *src_err_ptr, int *dst_err_ptr);
9602
9603 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9604 + int len, __wsum sum,
9605 + int *src_err_ptr, int *dst_err_ptr);
9606 +
9607 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9608 + int len, __wsum sum,
9609 + int *src_err_ptr, int *dst_err_ptr);
9610 +
9611 /*
9612 * Note: when you get a NULL pointer exception here this means someone
9613 * passed in an incorrect kernel address to one of these functions.
9614 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9615 int *err_ptr)
9616 {
9617 might_sleep();
9618 - return csum_partial_copy_generic((__force void *)src, dst,
9619 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9620 len, sum, err_ptr, NULL);
9621 }
9622
9623 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9624 {
9625 might_sleep();
9626 if (access_ok(VERIFY_WRITE, dst, len))
9627 - return csum_partial_copy_generic(src, (__force void *)dst,
9628 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9629 len, sum, NULL, err_ptr);
9630
9631 if (len)
9632 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9633 index b3b7332..3935f40 100644
9634 --- a/arch/x86/include/asm/cmpxchg.h
9635 +++ b/arch/x86/include/asm/cmpxchg.h
9636 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
9637 __compiletime_error("Bad argument size for cmpxchg");
9638 extern void __xadd_wrong_size(void)
9639 __compiletime_error("Bad argument size for xadd");
9640 +extern void __xadd_check_overflow_wrong_size(void)
9641 + __compiletime_error("Bad argument size for xadd_check_overflow");
9642 extern void __add_wrong_size(void)
9643 __compiletime_error("Bad argument size for add");
9644 +extern void __add_check_overflow_wrong_size(void)
9645 + __compiletime_error("Bad argument size for add_check_overflow");
9646
9647 /*
9648 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9649 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
9650 __ret; \
9651 })
9652
9653 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
9654 + ({ \
9655 + __typeof__ (*(ptr)) __ret = (arg); \
9656 + switch (sizeof(*(ptr))) { \
9657 + case __X86_CASE_L: \
9658 + asm volatile (lock #op "l %0, %1\n" \
9659 + "jno 0f\n" \
9660 + "mov %0,%1\n" \
9661 + "int $4\n0:\n" \
9662 + _ASM_EXTABLE(0b, 0b) \
9663 + : "+r" (__ret), "+m" (*(ptr)) \
9664 + : : "memory", "cc"); \
9665 + break; \
9666 + case __X86_CASE_Q: \
9667 + asm volatile (lock #op "q %q0, %1\n" \
9668 + "jno 0f\n" \
9669 + "mov %0,%1\n" \
9670 + "int $4\n0:\n" \
9671 + _ASM_EXTABLE(0b, 0b) \
9672 + : "+r" (__ret), "+m" (*(ptr)) \
9673 + : : "memory", "cc"); \
9674 + break; \
9675 + default: \
9676 + __ ## op ## _check_overflow_wrong_size(); \
9677 + } \
9678 + __ret; \
9679 + })
9680 +
9681 /*
9682 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
9683 * Since this is generally used to protect other memory information, we
9684 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
9685 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9686 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9687
9688 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
9689 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9690 +
9691 #define __add(ptr, inc, lock) \
9692 ({ \
9693 __typeof__ (*(ptr)) __ret = (inc); \
9694 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9695 index 8d67d42..183d0eb 100644
9696 --- a/arch/x86/include/asm/cpufeature.h
9697 +++ b/arch/x86/include/asm/cpufeature.h
9698 @@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9699 ".section .discard,\"aw\",@progbits\n"
9700 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9701 ".previous\n"
9702 - ".section .altinstr_replacement,\"ax\"\n"
9703 + ".section .altinstr_replacement,\"a\"\n"
9704 "3: movb $1,%0\n"
9705 "4:\n"
9706 ".previous\n"
9707 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9708 index e95822d..a90010e 100644
9709 --- a/arch/x86/include/asm/desc.h
9710 +++ b/arch/x86/include/asm/desc.h
9711 @@ -4,6 +4,7 @@
9712 #include <asm/desc_defs.h>
9713 #include <asm/ldt.h>
9714 #include <asm/mmu.h>
9715 +#include <asm/pgtable.h>
9716
9717 #include <linux/smp.h>
9718
9719 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9720
9721 desc->type = (info->read_exec_only ^ 1) << 1;
9722 desc->type |= info->contents << 2;
9723 + desc->type |= info->seg_not_present ^ 1;
9724
9725 desc->s = 1;
9726 desc->dpl = 0x3;
9727 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9728 }
9729
9730 extern struct desc_ptr idt_descr;
9731 -extern gate_desc idt_table[];
9732 extern struct desc_ptr nmi_idt_descr;
9733 -extern gate_desc nmi_idt_table[];
9734 -
9735 -struct gdt_page {
9736 - struct desc_struct gdt[GDT_ENTRIES];
9737 -} __attribute__((aligned(PAGE_SIZE)));
9738 -
9739 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9740 +extern gate_desc idt_table[256];
9741 +extern gate_desc nmi_idt_table[256];
9742
9743 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9744 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9745 {
9746 - return per_cpu(gdt_page, cpu).gdt;
9747 + return cpu_gdt_table[cpu];
9748 }
9749
9750 #ifdef CONFIG_X86_64
9751 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9752 unsigned long base, unsigned dpl, unsigned flags,
9753 unsigned short seg)
9754 {
9755 - gate->a = (seg << 16) | (base & 0xffff);
9756 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9757 + gate->gate.offset_low = base;
9758 + gate->gate.seg = seg;
9759 + gate->gate.reserved = 0;
9760 + gate->gate.type = type;
9761 + gate->gate.s = 0;
9762 + gate->gate.dpl = dpl;
9763 + gate->gate.p = 1;
9764 + gate->gate.offset_high = base >> 16;
9765 }
9766
9767 #endif
9768 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9769
9770 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9771 {
9772 + pax_open_kernel();
9773 memcpy(&idt[entry], gate, sizeof(*gate));
9774 + pax_close_kernel();
9775 }
9776
9777 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9778 {
9779 + pax_open_kernel();
9780 memcpy(&ldt[entry], desc, 8);
9781 + pax_close_kernel();
9782 }
9783
9784 static inline void
9785 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9786 default: size = sizeof(*gdt); break;
9787 }
9788
9789 + pax_open_kernel();
9790 memcpy(&gdt[entry], desc, size);
9791 + pax_close_kernel();
9792 }
9793
9794 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9795 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9796
9797 static inline void native_load_tr_desc(void)
9798 {
9799 + pax_open_kernel();
9800 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9801 + pax_close_kernel();
9802 }
9803
9804 static inline void native_load_gdt(const struct desc_ptr *dtr)
9805 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9806 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9807 unsigned int i;
9808
9809 + pax_open_kernel();
9810 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9811 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9812 + pax_close_kernel();
9813 }
9814
9815 #define _LDT_empty(info) \
9816 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9817 }
9818
9819 #ifdef CONFIG_X86_64
9820 -static inline void set_nmi_gate(int gate, void *addr)
9821 +static inline void set_nmi_gate(int gate, const void *addr)
9822 {
9823 gate_desc s;
9824
9825 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
9826 }
9827 #endif
9828
9829 -static inline void _set_gate(int gate, unsigned type, void *addr,
9830 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9831 unsigned dpl, unsigned ist, unsigned seg)
9832 {
9833 gate_desc s;
9834 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9835 * Pentium F0 0F bugfix can have resulted in the mapped
9836 * IDT being write-protected.
9837 */
9838 -static inline void set_intr_gate(unsigned int n, void *addr)
9839 +static inline void set_intr_gate(unsigned int n, const void *addr)
9840 {
9841 BUG_ON((unsigned)n > 0xFF);
9842 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9843 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9844 /*
9845 * This routine sets up an interrupt gate at directory privilege level 3.
9846 */
9847 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9848 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9849 {
9850 BUG_ON((unsigned)n > 0xFF);
9851 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9852 }
9853
9854 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9855 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9856 {
9857 BUG_ON((unsigned)n > 0xFF);
9858 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9859 }
9860
9861 -static inline void set_trap_gate(unsigned int n, void *addr)
9862 +static inline void set_trap_gate(unsigned int n, const void *addr)
9863 {
9864 BUG_ON((unsigned)n > 0xFF);
9865 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9866 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9867 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9868 {
9869 BUG_ON((unsigned)n > 0xFF);
9870 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9871 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9872 }
9873
9874 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9875 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9876 {
9877 BUG_ON((unsigned)n > 0xFF);
9878 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9879 }
9880
9881 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9882 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9883 {
9884 BUG_ON((unsigned)n > 0xFF);
9885 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9886 }
9887
9888 +#ifdef CONFIG_X86_32
9889 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9890 +{
9891 + struct desc_struct d;
9892 +
9893 + if (likely(limit))
9894 + limit = (limit - 1UL) >> PAGE_SHIFT;
9895 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9896 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9897 +}
9898 +#endif
9899 +
9900 #endif /* _ASM_X86_DESC_H */
9901 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9902 index 278441f..b95a174 100644
9903 --- a/arch/x86/include/asm/desc_defs.h
9904 +++ b/arch/x86/include/asm/desc_defs.h
9905 @@ -31,6 +31,12 @@ struct desc_struct {
9906 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9907 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9908 };
9909 + struct {
9910 + u16 offset_low;
9911 + u16 seg;
9912 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9913 + unsigned offset_high: 16;
9914 + } gate;
9915 };
9916 } __attribute__((packed));
9917
9918 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9919 index 3778256..c5d4fce 100644
9920 --- a/arch/x86/include/asm/e820.h
9921 +++ b/arch/x86/include/asm/e820.h
9922 @@ -69,7 +69,7 @@ struct e820map {
9923 #define ISA_START_ADDRESS 0xa0000
9924 #define ISA_END_ADDRESS 0x100000
9925
9926 -#define BIOS_BEGIN 0x000a0000
9927 +#define BIOS_BEGIN 0x000c0000
9928 #define BIOS_END 0x00100000
9929
9930 #define BIOS_ROM_BASE 0xffe00000
9931 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9932 index 5f962df..7289f09 100644
9933 --- a/arch/x86/include/asm/elf.h
9934 +++ b/arch/x86/include/asm/elf.h
9935 @@ -238,7 +238,25 @@ extern int force_personality32;
9936 the loader. We need to make sure that it is out of the way of the program
9937 that it will "exec", and that there is sufficient room for the brk. */
9938
9939 +#ifdef CONFIG_PAX_SEGMEXEC
9940 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9941 +#else
9942 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9943 +#endif
9944 +
9945 +#ifdef CONFIG_PAX_ASLR
9946 +#ifdef CONFIG_X86_32
9947 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9948 +
9949 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9950 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9951 +#else
9952 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9953 +
9954 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9955 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9956 +#endif
9957 +#endif
9958
9959 /* This yields a mask that user programs can use to figure out what
9960 instruction set this CPU supports. This could be done in user space,
9961 @@ -291,9 +309,7 @@ do { \
9962
9963 #define ARCH_DLINFO \
9964 do { \
9965 - if (vdso_enabled) \
9966 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9967 - (unsigned long)current->mm->context.vdso); \
9968 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9969 } while (0)
9970
9971 #define AT_SYSINFO 32
9972 @@ -304,7 +320,7 @@ do { \
9973
9974 #endif /* !CONFIG_X86_32 */
9975
9976 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9977 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9978
9979 #define VDSO_ENTRY \
9980 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9981 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9982 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9983 #define compat_arch_setup_additional_pages syscall32_setup_pages
9984
9985 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9986 -#define arch_randomize_brk arch_randomize_brk
9987 -
9988 /*
9989 * True on X86_32 or when emulating IA32 on X86_64
9990 */
9991 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9992 index cc70c1c..d96d011 100644
9993 --- a/arch/x86/include/asm/emergency-restart.h
9994 +++ b/arch/x86/include/asm/emergency-restart.h
9995 @@ -15,6 +15,6 @@ enum reboot_type {
9996
9997 extern enum reboot_type reboot_type;
9998
9999 -extern void machine_emergency_restart(void);
10000 +extern void machine_emergency_restart(void) __noreturn;
10001
10002 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10003 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10004 index d09bb03..4ea4194 100644
10005 --- a/arch/x86/include/asm/futex.h
10006 +++ b/arch/x86/include/asm/futex.h
10007 @@ -12,16 +12,18 @@
10008 #include <asm/system.h>
10009
10010 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10011 + typecheck(u32 __user *, uaddr); \
10012 asm volatile("1:\t" insn "\n" \
10013 "2:\t.section .fixup,\"ax\"\n" \
10014 "3:\tmov\t%3, %1\n" \
10015 "\tjmp\t2b\n" \
10016 "\t.previous\n" \
10017 _ASM_EXTABLE(1b, 3b) \
10018 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10019 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10020 : "i" (-EFAULT), "0" (oparg), "1" (0))
10021
10022 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10023 + typecheck(u32 __user *, uaddr); \
10024 asm volatile("1:\tmovl %2, %0\n" \
10025 "\tmovl\t%0, %3\n" \
10026 "\t" insn "\n" \
10027 @@ -34,7 +36,7 @@
10028 _ASM_EXTABLE(1b, 4b) \
10029 _ASM_EXTABLE(2b, 4b) \
10030 : "=&a" (oldval), "=&r" (ret), \
10031 - "+m" (*uaddr), "=&r" (tem) \
10032 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10033 : "r" (oparg), "i" (-EFAULT), "1" (0))
10034
10035 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10036 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10037
10038 switch (op) {
10039 case FUTEX_OP_SET:
10040 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10041 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10042 break;
10043 case FUTEX_OP_ADD:
10044 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10045 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10046 uaddr, oparg);
10047 break;
10048 case FUTEX_OP_OR:
10049 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10050 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10051 return -EFAULT;
10052
10053 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10054 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10055 "2:\t.section .fixup, \"ax\"\n"
10056 "3:\tmov %3, %0\n"
10057 "\tjmp 2b\n"
10058 "\t.previous\n"
10059 _ASM_EXTABLE(1b, 3b)
10060 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10061 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10062 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10063 : "memory"
10064 );
10065 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10066 index eb92a6e..b98b2f4 100644
10067 --- a/arch/x86/include/asm/hw_irq.h
10068 +++ b/arch/x86/include/asm/hw_irq.h
10069 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10070 extern void enable_IO_APIC(void);
10071
10072 /* Statistics */
10073 -extern atomic_t irq_err_count;
10074 -extern atomic_t irq_mis_count;
10075 +extern atomic_unchecked_t irq_err_count;
10076 +extern atomic_unchecked_t irq_mis_count;
10077
10078 /* EISA */
10079 extern void eisa_set_level_irq(unsigned int irq);
10080 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10081 index 2479049..3fb9795 100644
10082 --- a/arch/x86/include/asm/i387.h
10083 +++ b/arch/x86/include/asm/i387.h
10084 @@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10085 {
10086 int err;
10087
10088 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10089 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10090 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10091 +#endif
10092 +
10093 /* See comment in fxsave() below. */
10094 #ifdef CONFIG_AS_FXSAVEQ
10095 asm volatile("1: fxrstorq %[fx]\n\t"
10096 @@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10097 {
10098 int err;
10099
10100 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10101 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10102 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10103 +#endif
10104 +
10105 /*
10106 * Clear the bytes not touched by the fxsave and reserved
10107 * for the SW usage.
10108 @@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10109 "emms\n\t" /* clear stack tags */
10110 "fildl %P[addr]", /* set F?P to defined value */
10111 X86_FEATURE_FXSAVE_LEAK,
10112 - [addr] "m" (tsk->thread.fpu.has_fpu));
10113 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10114
10115 return fpu_restore_checking(&tsk->thread.fpu);
10116 }
10117 @@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10118 static inline bool interrupted_user_mode(void)
10119 {
10120 struct pt_regs *regs = get_irq_regs();
10121 - return regs && user_mode_vm(regs);
10122 + return regs && user_mode(regs);
10123 }
10124
10125 /*
10126 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10127 index d8e8eef..99f81ae 100644
10128 --- a/arch/x86/include/asm/io.h
10129 +++ b/arch/x86/include/asm/io.h
10130 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10131
10132 #include <linux/vmalloc.h>
10133
10134 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10135 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10136 +{
10137 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10138 +}
10139 +
10140 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10141 +{
10142 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10143 +}
10144 +
10145 /*
10146 * Convert a virtual cached pointer to an uncached pointer
10147 */
10148 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10149 index bba3cf8..06bc8da 100644
10150 --- a/arch/x86/include/asm/irqflags.h
10151 +++ b/arch/x86/include/asm/irqflags.h
10152 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10153 sti; \
10154 sysexit
10155
10156 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10157 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10158 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10159 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10160 +
10161 #else
10162 #define INTERRUPT_RETURN iret
10163 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10164 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10165 index 5478825..839e88c 100644
10166 --- a/arch/x86/include/asm/kprobes.h
10167 +++ b/arch/x86/include/asm/kprobes.h
10168 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10169 #define RELATIVEJUMP_SIZE 5
10170 #define RELATIVECALL_OPCODE 0xe8
10171 #define RELATIVE_ADDR_SIZE 4
10172 -#define MAX_STACK_SIZE 64
10173 -#define MIN_STACK_SIZE(ADDR) \
10174 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10175 - THREAD_SIZE - (unsigned long)(ADDR))) \
10176 - ? (MAX_STACK_SIZE) \
10177 - : (((unsigned long)current_thread_info()) + \
10178 - THREAD_SIZE - (unsigned long)(ADDR)))
10179 +#define MAX_STACK_SIZE 64UL
10180 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10181
10182 #define flush_insn_slot(p) do { } while (0)
10183
10184 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10185 index 52d6640..a013b87 100644
10186 --- a/arch/x86/include/asm/kvm_host.h
10187 +++ b/arch/x86/include/asm/kvm_host.h
10188 @@ -663,7 +663,7 @@ struct kvm_x86_ops {
10189 int (*check_intercept)(struct kvm_vcpu *vcpu,
10190 struct x86_instruction_info *info,
10191 enum x86_intercept_stage stage);
10192 -};
10193 +} __do_const;
10194
10195 struct kvm_arch_async_pf {
10196 u32 token;
10197 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10198 index 9cdae5d..300d20f 100644
10199 --- a/arch/x86/include/asm/local.h
10200 +++ b/arch/x86/include/asm/local.h
10201 @@ -18,26 +18,58 @@ typedef struct {
10202
10203 static inline void local_inc(local_t *l)
10204 {
10205 - asm volatile(_ASM_INC "%0"
10206 + asm volatile(_ASM_INC "%0\n"
10207 +
10208 +#ifdef CONFIG_PAX_REFCOUNT
10209 + "jno 0f\n"
10210 + _ASM_DEC "%0\n"
10211 + "int $4\n0:\n"
10212 + _ASM_EXTABLE(0b, 0b)
10213 +#endif
10214 +
10215 : "+m" (l->a.counter));
10216 }
10217
10218 static inline void local_dec(local_t *l)
10219 {
10220 - asm volatile(_ASM_DEC "%0"
10221 + asm volatile(_ASM_DEC "%0\n"
10222 +
10223 +#ifdef CONFIG_PAX_REFCOUNT
10224 + "jno 0f\n"
10225 + _ASM_INC "%0\n"
10226 + "int $4\n0:\n"
10227 + _ASM_EXTABLE(0b, 0b)
10228 +#endif
10229 +
10230 : "+m" (l->a.counter));
10231 }
10232
10233 static inline void local_add(long i, local_t *l)
10234 {
10235 - asm volatile(_ASM_ADD "%1,%0"
10236 + asm volatile(_ASM_ADD "%1,%0\n"
10237 +
10238 +#ifdef CONFIG_PAX_REFCOUNT
10239 + "jno 0f\n"
10240 + _ASM_SUB "%1,%0\n"
10241 + "int $4\n0:\n"
10242 + _ASM_EXTABLE(0b, 0b)
10243 +#endif
10244 +
10245 : "+m" (l->a.counter)
10246 : "ir" (i));
10247 }
10248
10249 static inline void local_sub(long i, local_t *l)
10250 {
10251 - asm volatile(_ASM_SUB "%1,%0"
10252 + asm volatile(_ASM_SUB "%1,%0\n"
10253 +
10254 +#ifdef CONFIG_PAX_REFCOUNT
10255 + "jno 0f\n"
10256 + _ASM_ADD "%1,%0\n"
10257 + "int $4\n0:\n"
10258 + _ASM_EXTABLE(0b, 0b)
10259 +#endif
10260 +
10261 : "+m" (l->a.counter)
10262 : "ir" (i));
10263 }
10264 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10265 {
10266 unsigned char c;
10267
10268 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10269 + asm volatile(_ASM_SUB "%2,%0\n"
10270 +
10271 +#ifdef CONFIG_PAX_REFCOUNT
10272 + "jno 0f\n"
10273 + _ASM_ADD "%2,%0\n"
10274 + "int $4\n0:\n"
10275 + _ASM_EXTABLE(0b, 0b)
10276 +#endif
10277 +
10278 + "sete %1\n"
10279 : "+m" (l->a.counter), "=qm" (c)
10280 : "ir" (i) : "memory");
10281 return c;
10282 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10283 {
10284 unsigned char c;
10285
10286 - asm volatile(_ASM_DEC "%0; sete %1"
10287 + asm volatile(_ASM_DEC "%0\n"
10288 +
10289 +#ifdef CONFIG_PAX_REFCOUNT
10290 + "jno 0f\n"
10291 + _ASM_INC "%0\n"
10292 + "int $4\n0:\n"
10293 + _ASM_EXTABLE(0b, 0b)
10294 +#endif
10295 +
10296 + "sete %1\n"
10297 : "+m" (l->a.counter), "=qm" (c)
10298 : : "memory");
10299 return c != 0;
10300 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10301 {
10302 unsigned char c;
10303
10304 - asm volatile(_ASM_INC "%0; sete %1"
10305 + asm volatile(_ASM_INC "%0\n"
10306 +
10307 +#ifdef CONFIG_PAX_REFCOUNT
10308 + "jno 0f\n"
10309 + _ASM_DEC "%0\n"
10310 + "int $4\n0:\n"
10311 + _ASM_EXTABLE(0b, 0b)
10312 +#endif
10313 +
10314 + "sete %1\n"
10315 : "+m" (l->a.counter), "=qm" (c)
10316 : : "memory");
10317 return c != 0;
10318 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10319 {
10320 unsigned char c;
10321
10322 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10323 + asm volatile(_ASM_ADD "%2,%0\n"
10324 +
10325 +#ifdef CONFIG_PAX_REFCOUNT
10326 + "jno 0f\n"
10327 + _ASM_SUB "%2,%0\n"
10328 + "int $4\n0:\n"
10329 + _ASM_EXTABLE(0b, 0b)
10330 +#endif
10331 +
10332 + "sets %1\n"
10333 : "+m" (l->a.counter), "=qm" (c)
10334 : "ir" (i) : "memory");
10335 return c;
10336 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10337 #endif
10338 /* Modern 486+ processor */
10339 __i = i;
10340 - asm volatile(_ASM_XADD "%0, %1;"
10341 + asm volatile(_ASM_XADD "%0, %1\n"
10342 +
10343 +#ifdef CONFIG_PAX_REFCOUNT
10344 + "jno 0f\n"
10345 + _ASM_MOV "%0,%1\n"
10346 + "int $4\n0:\n"
10347 + _ASM_EXTABLE(0b, 0b)
10348 +#endif
10349 +
10350 : "+r" (i), "+m" (l->a.counter)
10351 : : "memory");
10352 return i + __i;
10353 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10354 index 593e51d..fa69c9a 100644
10355 --- a/arch/x86/include/asm/mman.h
10356 +++ b/arch/x86/include/asm/mman.h
10357 @@ -5,4 +5,14 @@
10358
10359 #include <asm-generic/mman.h>
10360
10361 +#ifdef __KERNEL__
10362 +#ifndef __ASSEMBLY__
10363 +#ifdef CONFIG_X86_32
10364 +#define arch_mmap_check i386_mmap_check
10365 +int i386_mmap_check(unsigned long addr, unsigned long len,
10366 + unsigned long flags);
10367 +#endif
10368 +#endif
10369 +#endif
10370 +
10371 #endif /* _ASM_X86_MMAN_H */
10372 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10373 index 5f55e69..e20bfb1 100644
10374 --- a/arch/x86/include/asm/mmu.h
10375 +++ b/arch/x86/include/asm/mmu.h
10376 @@ -9,7 +9,7 @@
10377 * we put the segment information here.
10378 */
10379 typedef struct {
10380 - void *ldt;
10381 + struct desc_struct *ldt;
10382 int size;
10383
10384 #ifdef CONFIG_X86_64
10385 @@ -18,7 +18,19 @@ typedef struct {
10386 #endif
10387
10388 struct mutex lock;
10389 - void *vdso;
10390 + unsigned long vdso;
10391 +
10392 +#ifdef CONFIG_X86_32
10393 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10394 + unsigned long user_cs_base;
10395 + unsigned long user_cs_limit;
10396 +
10397 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10398 + cpumask_t cpu_user_cs_mask;
10399 +#endif
10400 +
10401 +#endif
10402 +#endif
10403 } mm_context_t;
10404
10405 #ifdef CONFIG_SMP
10406 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10407 index 6902152..399f3a2 100644
10408 --- a/arch/x86/include/asm/mmu_context.h
10409 +++ b/arch/x86/include/asm/mmu_context.h
10410 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10411
10412 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10413 {
10414 +
10415 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10416 + unsigned int i;
10417 + pgd_t *pgd;
10418 +
10419 + pax_open_kernel();
10420 + pgd = get_cpu_pgd(smp_processor_id());
10421 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10422 + set_pgd_batched(pgd+i, native_make_pgd(0));
10423 + pax_close_kernel();
10424 +#endif
10425 +
10426 #ifdef CONFIG_SMP
10427 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10428 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10429 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10430 struct task_struct *tsk)
10431 {
10432 unsigned cpu = smp_processor_id();
10433 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10434 + int tlbstate = TLBSTATE_OK;
10435 +#endif
10436
10437 if (likely(prev != next)) {
10438 #ifdef CONFIG_SMP
10439 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10440 + tlbstate = percpu_read(cpu_tlbstate.state);
10441 +#endif
10442 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10443 percpu_write(cpu_tlbstate.active_mm, next);
10444 #endif
10445 cpumask_set_cpu(cpu, mm_cpumask(next));
10446
10447 /* Re-load page tables */
10448 +#ifdef CONFIG_PAX_PER_CPU_PGD
10449 + pax_open_kernel();
10450 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10451 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10452 + pax_close_kernel();
10453 + load_cr3(get_cpu_pgd(cpu));
10454 +#else
10455 load_cr3(next->pgd);
10456 +#endif
10457
10458 /* stop flush ipis for the previous mm */
10459 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10460 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10461 */
10462 if (unlikely(prev->context.ldt != next->context.ldt))
10463 load_LDT_nolock(&next->context);
10464 - }
10465 +
10466 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10467 + if (!(__supported_pte_mask & _PAGE_NX)) {
10468 + smp_mb__before_clear_bit();
10469 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10470 + smp_mb__after_clear_bit();
10471 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10472 + }
10473 +#endif
10474 +
10475 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10476 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10477 + prev->context.user_cs_limit != next->context.user_cs_limit))
10478 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10479 #ifdef CONFIG_SMP
10480 + else if (unlikely(tlbstate != TLBSTATE_OK))
10481 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10482 +#endif
10483 +#endif
10484 +
10485 + }
10486 else {
10487 +
10488 +#ifdef CONFIG_PAX_PER_CPU_PGD
10489 + pax_open_kernel();
10490 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10491 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10492 + pax_close_kernel();
10493 + load_cr3(get_cpu_pgd(cpu));
10494 +#endif
10495 +
10496 +#ifdef CONFIG_SMP
10497 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10498 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10499
10500 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10501 * tlb flush IPI delivery. We must reload CR3
10502 * to make sure to use no freed page tables.
10503 */
10504 +
10505 +#ifndef CONFIG_PAX_PER_CPU_PGD
10506 load_cr3(next->pgd);
10507 +#endif
10508 +
10509 load_LDT_nolock(&next->context);
10510 +
10511 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10512 + if (!(__supported_pte_mask & _PAGE_NX))
10513 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10514 +#endif
10515 +
10516 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10517 +#ifdef CONFIG_PAX_PAGEEXEC
10518 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10519 +#endif
10520 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10521 +#endif
10522 +
10523 }
10524 +#endif
10525 }
10526 -#endif
10527 }
10528
10529 #define activate_mm(prev, next) \
10530 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10531 index 9eae775..c914fea 100644
10532 --- a/arch/x86/include/asm/module.h
10533 +++ b/arch/x86/include/asm/module.h
10534 @@ -5,6 +5,7 @@
10535
10536 #ifdef CONFIG_X86_64
10537 /* X86_64 does not define MODULE_PROC_FAMILY */
10538 +#define MODULE_PROC_FAMILY ""
10539 #elif defined CONFIG_M386
10540 #define MODULE_PROC_FAMILY "386 "
10541 #elif defined CONFIG_M486
10542 @@ -59,8 +60,20 @@
10543 #error unknown processor family
10544 #endif
10545
10546 -#ifdef CONFIG_X86_32
10547 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10548 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10549 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10550 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10551 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10552 +#else
10553 +#define MODULE_PAX_KERNEXEC ""
10554 #endif
10555
10556 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10557 +#define MODULE_PAX_UDEREF "UDEREF "
10558 +#else
10559 +#define MODULE_PAX_UDEREF ""
10560 +#endif
10561 +
10562 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10563 +
10564 #endif /* _ASM_X86_MODULE_H */
10565 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10566 index 7639dbf..e08a58c 100644
10567 --- a/arch/x86/include/asm/page_64_types.h
10568 +++ b/arch/x86/include/asm/page_64_types.h
10569 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10570
10571 /* duplicated to the one in bootmem.h */
10572 extern unsigned long max_pfn;
10573 -extern unsigned long phys_base;
10574 +extern const unsigned long phys_base;
10575
10576 extern unsigned long __phys_addr(unsigned long);
10577 #define __phys_reloc_hide(x) (x)
10578 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10579 index a7d2db9..edb023e 100644
10580 --- a/arch/x86/include/asm/paravirt.h
10581 +++ b/arch/x86/include/asm/paravirt.h
10582 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10583 val);
10584 }
10585
10586 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10587 +{
10588 + pgdval_t val = native_pgd_val(pgd);
10589 +
10590 + if (sizeof(pgdval_t) > sizeof(long))
10591 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10592 + val, (u64)val >> 32);
10593 + else
10594 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10595 + val);
10596 +}
10597 +
10598 static inline void pgd_clear(pgd_t *pgdp)
10599 {
10600 set_pgd(pgdp, __pgd(0));
10601 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10602 pv_mmu_ops.set_fixmap(idx, phys, flags);
10603 }
10604
10605 +#ifdef CONFIG_PAX_KERNEXEC
10606 +static inline unsigned long pax_open_kernel(void)
10607 +{
10608 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10609 +}
10610 +
10611 +static inline unsigned long pax_close_kernel(void)
10612 +{
10613 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10614 +}
10615 +#else
10616 +static inline unsigned long pax_open_kernel(void) { return 0; }
10617 +static inline unsigned long pax_close_kernel(void) { return 0; }
10618 +#endif
10619 +
10620 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10621
10622 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10623 @@ -964,7 +991,7 @@ extern void default_banner(void);
10624
10625 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10626 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10627 -#define PARA_INDIRECT(addr) *%cs:addr
10628 +#define PARA_INDIRECT(addr) *%ss:addr
10629 #endif
10630
10631 #define INTERRUPT_RETURN \
10632 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
10633 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10634 CLBR_NONE, \
10635 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10636 +
10637 +#define GET_CR0_INTO_RDI \
10638 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10639 + mov %rax,%rdi
10640 +
10641 +#define SET_RDI_INTO_CR0 \
10642 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10643 +
10644 +#define GET_CR3_INTO_RDI \
10645 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10646 + mov %rax,%rdi
10647 +
10648 +#define SET_RDI_INTO_CR3 \
10649 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10650 +
10651 #endif /* CONFIG_X86_32 */
10652
10653 #endif /* __ASSEMBLY__ */
10654 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10655 index 8e8b9a4..f07d725 100644
10656 --- a/arch/x86/include/asm/paravirt_types.h
10657 +++ b/arch/x86/include/asm/paravirt_types.h
10658 @@ -84,20 +84,20 @@ struct pv_init_ops {
10659 */
10660 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10661 unsigned long addr, unsigned len);
10662 -};
10663 +} __no_const;
10664
10665
10666 struct pv_lazy_ops {
10667 /* Set deferred update mode, used for batching operations. */
10668 void (*enter)(void);
10669 void (*leave)(void);
10670 -};
10671 +} __no_const;
10672
10673 struct pv_time_ops {
10674 unsigned long long (*sched_clock)(void);
10675 unsigned long long (*steal_clock)(int cpu);
10676 unsigned long (*get_tsc_khz)(void);
10677 -};
10678 +} __no_const;
10679
10680 struct pv_cpu_ops {
10681 /* hooks for various privileged instructions */
10682 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
10683
10684 void (*start_context_switch)(struct task_struct *prev);
10685 void (*end_context_switch)(struct task_struct *next);
10686 -};
10687 +} __no_const;
10688
10689 struct pv_irq_ops {
10690 /*
10691 @@ -224,7 +224,7 @@ struct pv_apic_ops {
10692 unsigned long start_eip,
10693 unsigned long start_esp);
10694 #endif
10695 -};
10696 +} __no_const;
10697
10698 struct pv_mmu_ops {
10699 unsigned long (*read_cr2)(void);
10700 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
10701 struct paravirt_callee_save make_pud;
10702
10703 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10704 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10705 #endif /* PAGETABLE_LEVELS == 4 */
10706 #endif /* PAGETABLE_LEVELS >= 3 */
10707
10708 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
10709 an mfn. We can tell which is which from the index. */
10710 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10711 phys_addr_t phys, pgprot_t flags);
10712 +
10713 +#ifdef CONFIG_PAX_KERNEXEC
10714 + unsigned long (*pax_open_kernel)(void);
10715 + unsigned long (*pax_close_kernel)(void);
10716 +#endif
10717 +
10718 };
10719
10720 struct arch_spinlock;
10721 @@ -334,7 +341,7 @@ struct pv_lock_ops {
10722 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10723 int (*spin_trylock)(struct arch_spinlock *lock);
10724 void (*spin_unlock)(struct arch_spinlock *lock);
10725 -};
10726 +} __no_const;
10727
10728 /* This contains all the paravirt structures: we get a convenient
10729 * number for each function using the offset which we use to indicate
10730 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10731 index b4389a4..b7ff22c 100644
10732 --- a/arch/x86/include/asm/pgalloc.h
10733 +++ b/arch/x86/include/asm/pgalloc.h
10734 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10735 pmd_t *pmd, pte_t *pte)
10736 {
10737 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10738 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10739 +}
10740 +
10741 +static inline void pmd_populate_user(struct mm_struct *mm,
10742 + pmd_t *pmd, pte_t *pte)
10743 +{
10744 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10745 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10746 }
10747
10748 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10749 index 98391db..8f6984e 100644
10750 --- a/arch/x86/include/asm/pgtable-2level.h
10751 +++ b/arch/x86/include/asm/pgtable-2level.h
10752 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10753
10754 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10755 {
10756 + pax_open_kernel();
10757 *pmdp = pmd;
10758 + pax_close_kernel();
10759 }
10760
10761 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10762 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10763 index effff47..f9e4035 100644
10764 --- a/arch/x86/include/asm/pgtable-3level.h
10765 +++ b/arch/x86/include/asm/pgtable-3level.h
10766 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10767
10768 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10769 {
10770 + pax_open_kernel();
10771 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10772 + pax_close_kernel();
10773 }
10774
10775 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10776 {
10777 + pax_open_kernel();
10778 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10779 + pax_close_kernel();
10780 }
10781
10782 /*
10783 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10784 index 49afb3f..ed14d07 100644
10785 --- a/arch/x86/include/asm/pgtable.h
10786 +++ b/arch/x86/include/asm/pgtable.h
10787 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10788
10789 #ifndef __PAGETABLE_PUD_FOLDED
10790 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10791 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10792 #define pgd_clear(pgd) native_pgd_clear(pgd)
10793 #endif
10794
10795 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10796
10797 #define arch_end_context_switch(prev) do {} while(0)
10798
10799 +#define pax_open_kernel() native_pax_open_kernel()
10800 +#define pax_close_kernel() native_pax_close_kernel()
10801 #endif /* CONFIG_PARAVIRT */
10802
10803 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10804 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10805 +
10806 +#ifdef CONFIG_PAX_KERNEXEC
10807 +static inline unsigned long native_pax_open_kernel(void)
10808 +{
10809 + unsigned long cr0;
10810 +
10811 + preempt_disable();
10812 + barrier();
10813 + cr0 = read_cr0() ^ X86_CR0_WP;
10814 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
10815 + write_cr0(cr0);
10816 + return cr0 ^ X86_CR0_WP;
10817 +}
10818 +
10819 +static inline unsigned long native_pax_close_kernel(void)
10820 +{
10821 + unsigned long cr0;
10822 +
10823 + cr0 = read_cr0() ^ X86_CR0_WP;
10824 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10825 + write_cr0(cr0);
10826 + barrier();
10827 + preempt_enable_no_resched();
10828 + return cr0 ^ X86_CR0_WP;
10829 +}
10830 +#else
10831 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
10832 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
10833 +#endif
10834 +
10835 /*
10836 * The following only work if pte_present() is true.
10837 * Undefined behaviour if not..
10838 */
10839 +static inline int pte_user(pte_t pte)
10840 +{
10841 + return pte_val(pte) & _PAGE_USER;
10842 +}
10843 +
10844 static inline int pte_dirty(pte_t pte)
10845 {
10846 return pte_flags(pte) & _PAGE_DIRTY;
10847 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10848 return pte_clear_flags(pte, _PAGE_RW);
10849 }
10850
10851 +static inline pte_t pte_mkread(pte_t pte)
10852 +{
10853 + return __pte(pte_val(pte) | _PAGE_USER);
10854 +}
10855 +
10856 static inline pte_t pte_mkexec(pte_t pte)
10857 {
10858 - return pte_clear_flags(pte, _PAGE_NX);
10859 +#ifdef CONFIG_X86_PAE
10860 + if (__supported_pte_mask & _PAGE_NX)
10861 + return pte_clear_flags(pte, _PAGE_NX);
10862 + else
10863 +#endif
10864 + return pte_set_flags(pte, _PAGE_USER);
10865 +}
10866 +
10867 +static inline pte_t pte_exprotect(pte_t pte)
10868 +{
10869 +#ifdef CONFIG_X86_PAE
10870 + if (__supported_pte_mask & _PAGE_NX)
10871 + return pte_set_flags(pte, _PAGE_NX);
10872 + else
10873 +#endif
10874 + return pte_clear_flags(pte, _PAGE_USER);
10875 }
10876
10877 static inline pte_t pte_mkdirty(pte_t pte)
10878 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10879 #endif
10880
10881 #ifndef __ASSEMBLY__
10882 +
10883 +#ifdef CONFIG_PAX_PER_CPU_PGD
10884 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10885 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10886 +{
10887 + return cpu_pgd[cpu];
10888 +}
10889 +#endif
10890 +
10891 #include <linux/mm_types.h>
10892
10893 static inline int pte_none(pte_t pte)
10894 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10895
10896 static inline int pgd_bad(pgd_t pgd)
10897 {
10898 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10899 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10900 }
10901
10902 static inline int pgd_none(pgd_t pgd)
10903 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10904 * pgd_offset() returns a (pgd_t *)
10905 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10906 */
10907 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10908 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10909 +
10910 +#ifdef CONFIG_PAX_PER_CPU_PGD
10911 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10912 +#endif
10913 +
10914 /*
10915 * a shortcut which implies the use of the kernel's pgd, instead
10916 * of a process's
10917 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10918 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10919 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10920
10921 +#ifdef CONFIG_X86_32
10922 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10923 +#else
10924 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10925 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10926 +
10927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10928 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10929 +#else
10930 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10931 +#endif
10932 +
10933 +#endif
10934 +
10935 #ifndef __ASSEMBLY__
10936
10937 extern int direct_gbpages;
10938 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10939 * dst and src can be on the same page, but the range must not overlap,
10940 * and must not cross a page boundary.
10941 */
10942 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10943 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10944 {
10945 - memcpy(dst, src, count * sizeof(pgd_t));
10946 + pax_open_kernel();
10947 + while (count--)
10948 + *dst++ = *src++;
10949 + pax_close_kernel();
10950 }
10951
10952 +#ifdef CONFIG_PAX_PER_CPU_PGD
10953 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10954 +#endif
10955 +
10956 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10957 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10958 +#else
10959 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
10960 +#endif
10961
10962 #include <asm-generic/pgtable.h>
10963 #endif /* __ASSEMBLY__ */
10964 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
10965 index 0c92113..34a77c6 100644
10966 --- a/arch/x86/include/asm/pgtable_32.h
10967 +++ b/arch/x86/include/asm/pgtable_32.h
10968 @@ -25,9 +25,6 @@
10969 struct mm_struct;
10970 struct vm_area_struct;
10971
10972 -extern pgd_t swapper_pg_dir[1024];
10973 -extern pgd_t initial_page_table[1024];
10974 -
10975 static inline void pgtable_cache_init(void) { }
10976 static inline void check_pgt_cache(void) { }
10977 void paging_init(void);
10978 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10979 # include <asm/pgtable-2level.h>
10980 #endif
10981
10982 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
10983 +extern pgd_t initial_page_table[PTRS_PER_PGD];
10984 +#ifdef CONFIG_X86_PAE
10985 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
10986 +#endif
10987 +
10988 #if defined(CONFIG_HIGHPTE)
10989 #define pte_offset_map(dir, address) \
10990 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
10991 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10992 /* Clear a kernel PTE and flush it from the TLB */
10993 #define kpte_clear_flush(ptep, vaddr) \
10994 do { \
10995 + pax_open_kernel(); \
10996 pte_clear(&init_mm, (vaddr), (ptep)); \
10997 + pax_close_kernel(); \
10998 __flush_tlb_one((vaddr)); \
10999 } while (0)
11000
11001 @@ -74,6 +79,9 @@ do { \
11002
11003 #endif /* !__ASSEMBLY__ */
11004
11005 +#define HAVE_ARCH_UNMAPPED_AREA
11006 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11007 +
11008 /*
11009 * kern_addr_valid() is (1) for FLATMEM and (0) for
11010 * SPARSEMEM and DISCONTIGMEM
11011 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11012 index ed5903b..c7fe163 100644
11013 --- a/arch/x86/include/asm/pgtable_32_types.h
11014 +++ b/arch/x86/include/asm/pgtable_32_types.h
11015 @@ -8,7 +8,7 @@
11016 */
11017 #ifdef CONFIG_X86_PAE
11018 # include <asm/pgtable-3level_types.h>
11019 -# define PMD_SIZE (1UL << PMD_SHIFT)
11020 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11021 # define PMD_MASK (~(PMD_SIZE - 1))
11022 #else
11023 # include <asm/pgtable-2level_types.h>
11024 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11025 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11026 #endif
11027
11028 +#ifdef CONFIG_PAX_KERNEXEC
11029 +#ifndef __ASSEMBLY__
11030 +extern unsigned char MODULES_EXEC_VADDR[];
11031 +extern unsigned char MODULES_EXEC_END[];
11032 +#endif
11033 +#include <asm/boot.h>
11034 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11035 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11036 +#else
11037 +#define ktla_ktva(addr) (addr)
11038 +#define ktva_ktla(addr) (addr)
11039 +#endif
11040 +
11041 #define MODULES_VADDR VMALLOC_START
11042 #define MODULES_END VMALLOC_END
11043 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11044 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11045 index 975f709..107976d 100644
11046 --- a/arch/x86/include/asm/pgtable_64.h
11047 +++ b/arch/x86/include/asm/pgtable_64.h
11048 @@ -16,10 +16,14 @@
11049
11050 extern pud_t level3_kernel_pgt[512];
11051 extern pud_t level3_ident_pgt[512];
11052 +extern pud_t level3_vmalloc_start_pgt[512];
11053 +extern pud_t level3_vmalloc_end_pgt[512];
11054 +extern pud_t level3_vmemmap_pgt[512];
11055 +extern pud_t level2_vmemmap_pgt[512];
11056 extern pmd_t level2_kernel_pgt[512];
11057 extern pmd_t level2_fixmap_pgt[512];
11058 -extern pmd_t level2_ident_pgt[512];
11059 -extern pgd_t init_level4_pgt[];
11060 +extern pmd_t level2_ident_pgt[512*2];
11061 +extern pgd_t init_level4_pgt[512];
11062
11063 #define swapper_pg_dir init_level4_pgt
11064
11065 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11066
11067 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11068 {
11069 + pax_open_kernel();
11070 *pmdp = pmd;
11071 + pax_close_kernel();
11072 }
11073
11074 static inline void native_pmd_clear(pmd_t *pmd)
11075 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11076
11077 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11078 {
11079 + pax_open_kernel();
11080 + *pgdp = pgd;
11081 + pax_close_kernel();
11082 +}
11083 +
11084 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11085 +{
11086 *pgdp = pgd;
11087 }
11088
11089 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11090 index 766ea16..5b96cb3 100644
11091 --- a/arch/x86/include/asm/pgtable_64_types.h
11092 +++ b/arch/x86/include/asm/pgtable_64_types.h
11093 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11094 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11095 #define MODULES_END _AC(0xffffffffff000000, UL)
11096 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11097 +#define MODULES_EXEC_VADDR MODULES_VADDR
11098 +#define MODULES_EXEC_END MODULES_END
11099 +
11100 +#define ktla_ktva(addr) (addr)
11101 +#define ktva_ktla(addr) (addr)
11102
11103 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11104 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11105 index 013286a..8b42f4f 100644
11106 --- a/arch/x86/include/asm/pgtable_types.h
11107 +++ b/arch/x86/include/asm/pgtable_types.h
11108 @@ -16,13 +16,12 @@
11109 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11110 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11111 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11112 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11113 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11114 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11115 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11116 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11117 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11118 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11119 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11120 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11121 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11122 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11123
11124 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11125 @@ -40,7 +39,6 @@
11126 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11127 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11128 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11129 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11130 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11131 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11132 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11133 @@ -57,8 +55,10 @@
11134
11135 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11136 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11137 -#else
11138 +#elif defined(CONFIG_KMEMCHECK)
11139 #define _PAGE_NX (_AT(pteval_t, 0))
11140 +#else
11141 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11142 #endif
11143
11144 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11145 @@ -96,6 +96,9 @@
11146 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11147 _PAGE_ACCESSED)
11148
11149 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11150 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11151 +
11152 #define __PAGE_KERNEL_EXEC \
11153 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11154 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11155 @@ -106,7 +109,7 @@
11156 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11157 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11158 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11159 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11160 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11161 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11162 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11163 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11164 @@ -168,8 +171,8 @@
11165 * bits are combined, this will alow user to access the high address mapped
11166 * VDSO in the presence of CONFIG_COMPAT_VDSO
11167 */
11168 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11169 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11170 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11171 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11172 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11173 #endif
11174
11175 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11176 {
11177 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11178 }
11179 +#endif
11180
11181 +#if PAGETABLE_LEVELS == 3
11182 +#include <asm-generic/pgtable-nopud.h>
11183 +#endif
11184 +
11185 +#if PAGETABLE_LEVELS == 2
11186 +#include <asm-generic/pgtable-nopmd.h>
11187 +#endif
11188 +
11189 +#ifndef __ASSEMBLY__
11190 #if PAGETABLE_LEVELS > 3
11191 typedef struct { pudval_t pud; } pud_t;
11192
11193 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11194 return pud.pud;
11195 }
11196 #else
11197 -#include <asm-generic/pgtable-nopud.h>
11198 -
11199 static inline pudval_t native_pud_val(pud_t pud)
11200 {
11201 return native_pgd_val(pud.pgd);
11202 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11203 return pmd.pmd;
11204 }
11205 #else
11206 -#include <asm-generic/pgtable-nopmd.h>
11207 -
11208 static inline pmdval_t native_pmd_val(pmd_t pmd)
11209 {
11210 return native_pgd_val(pmd.pud.pgd);
11211 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11212
11213 extern pteval_t __supported_pte_mask;
11214 extern void set_nx(void);
11215 -extern int nx_enabled;
11216
11217 #define pgprot_writecombine pgprot_writecombine
11218 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11219 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11220 index 58545c9..fe6fc38e 100644
11221 --- a/arch/x86/include/asm/processor.h
11222 +++ b/arch/x86/include/asm/processor.h
11223 @@ -266,7 +266,7 @@ struct tss_struct {
11224
11225 } ____cacheline_aligned;
11226
11227 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11228 +extern struct tss_struct init_tss[NR_CPUS];
11229
11230 /*
11231 * Save the original ist values for checking stack pointers during debugging
11232 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11233 */
11234 #define TASK_SIZE PAGE_OFFSET
11235 #define TASK_SIZE_MAX TASK_SIZE
11236 +
11237 +#ifdef CONFIG_PAX_SEGMEXEC
11238 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11239 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11240 +#else
11241 #define STACK_TOP TASK_SIZE
11242 -#define STACK_TOP_MAX STACK_TOP
11243 +#endif
11244 +
11245 +#define STACK_TOP_MAX TASK_SIZE
11246
11247 #define INIT_THREAD { \
11248 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11249 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11250 .vm86_info = NULL, \
11251 .sysenter_cs = __KERNEL_CS, \
11252 .io_bitmap_ptr = NULL, \
11253 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11254 */
11255 #define INIT_TSS { \
11256 .x86_tss = { \
11257 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11258 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11259 .ss0 = __KERNEL_DS, \
11260 .ss1 = __KERNEL_CS, \
11261 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11262 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11263 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11264
11265 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11266 -#define KSTK_TOP(info) \
11267 -({ \
11268 - unsigned long *__ptr = (unsigned long *)(info); \
11269 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11270 -})
11271 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11272
11273 /*
11274 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11275 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11276 #define task_pt_regs(task) \
11277 ({ \
11278 struct pt_regs *__regs__; \
11279 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11280 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11281 __regs__ - 1; \
11282 })
11283
11284 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11285 /*
11286 * User space process size. 47bits minus one guard page.
11287 */
11288 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11289 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11290
11291 /* This decides where the kernel will search for a free chunk of vm
11292 * space during mmap's.
11293 */
11294 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11295 - 0xc0000000 : 0xFFFFe000)
11296 + 0xc0000000 : 0xFFFFf000)
11297
11298 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11299 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11300 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11301 #define STACK_TOP_MAX TASK_SIZE_MAX
11302
11303 #define INIT_THREAD { \
11304 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11305 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11306 }
11307
11308 #define INIT_TSS { \
11309 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11310 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11311 }
11312
11313 /*
11314 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11315 */
11316 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11317
11318 +#ifdef CONFIG_PAX_SEGMEXEC
11319 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11320 +#endif
11321 +
11322 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11323
11324 /* Get/set a process' ability to use the timestamp counter instruction */
11325 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11326 index 3566454..4bdfb8c 100644
11327 --- a/arch/x86/include/asm/ptrace.h
11328 +++ b/arch/x86/include/asm/ptrace.h
11329 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11330 }
11331
11332 /*
11333 - * user_mode_vm(regs) determines whether a register set came from user mode.
11334 + * user_mode(regs) determines whether a register set came from user mode.
11335 * This is true if V8086 mode was enabled OR if the register set was from
11336 * protected mode with RPL-3 CS value. This tricky test checks that with
11337 * one comparison. Many places in the kernel can bypass this full check
11338 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11339 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11340 + * be used.
11341 */
11342 -static inline int user_mode(struct pt_regs *regs)
11343 +static inline int user_mode_novm(struct pt_regs *regs)
11344 {
11345 #ifdef CONFIG_X86_32
11346 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11347 #else
11348 - return !!(regs->cs & 3);
11349 + return !!(regs->cs & SEGMENT_RPL_MASK);
11350 #endif
11351 }
11352
11353 -static inline int user_mode_vm(struct pt_regs *regs)
11354 +static inline int user_mode(struct pt_regs *regs)
11355 {
11356 #ifdef CONFIG_X86_32
11357 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11358 USER_RPL;
11359 #else
11360 - return user_mode(regs);
11361 + return user_mode_novm(regs);
11362 #endif
11363 }
11364
11365 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11366 #ifdef CONFIG_X86_64
11367 static inline bool user_64bit_mode(struct pt_regs *regs)
11368 {
11369 + unsigned long cs = regs->cs & 0xffff;
11370 #ifndef CONFIG_PARAVIRT
11371 /*
11372 * On non-paravirt systems, this is the only long mode CPL 3
11373 * selector. We do not allow long mode selectors in the LDT.
11374 */
11375 - return regs->cs == __USER_CS;
11376 + return cs == __USER_CS;
11377 #else
11378 /* Headers are too twisted for this to go in paravirt.h. */
11379 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11380 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11381 #endif
11382 }
11383 #endif
11384 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11385 index 92f29706..a79cbbb 100644
11386 --- a/arch/x86/include/asm/reboot.h
11387 +++ b/arch/x86/include/asm/reboot.h
11388 @@ -6,19 +6,19 @@
11389 struct pt_regs;
11390
11391 struct machine_ops {
11392 - void (*restart)(char *cmd);
11393 - void (*halt)(void);
11394 - void (*power_off)(void);
11395 + void (* __noreturn restart)(char *cmd);
11396 + void (* __noreturn halt)(void);
11397 + void (* __noreturn power_off)(void);
11398 void (*shutdown)(void);
11399 void (*crash_shutdown)(struct pt_regs *);
11400 - void (*emergency_restart)(void);
11401 -};
11402 + void (* __noreturn emergency_restart)(void);
11403 +} __no_const;
11404
11405 extern struct machine_ops machine_ops;
11406
11407 void native_machine_crash_shutdown(struct pt_regs *regs);
11408 void native_machine_shutdown(void);
11409 -void machine_real_restart(unsigned int type);
11410 +void machine_real_restart(unsigned int type) __noreturn;
11411 /* These must match dispatch_table in reboot_32.S */
11412 #define MRR_BIOS 0
11413 #define MRR_APM 1
11414 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11415 index 2dbe4a7..ce1db00 100644
11416 --- a/arch/x86/include/asm/rwsem.h
11417 +++ b/arch/x86/include/asm/rwsem.h
11418 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11419 {
11420 asm volatile("# beginning down_read\n\t"
11421 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11422 +
11423 +#ifdef CONFIG_PAX_REFCOUNT
11424 + "jno 0f\n"
11425 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11426 + "int $4\n0:\n"
11427 + _ASM_EXTABLE(0b, 0b)
11428 +#endif
11429 +
11430 /* adds 0x00000001 */
11431 " jns 1f\n"
11432 " call call_rwsem_down_read_failed\n"
11433 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11434 "1:\n\t"
11435 " mov %1,%2\n\t"
11436 " add %3,%2\n\t"
11437 +
11438 +#ifdef CONFIG_PAX_REFCOUNT
11439 + "jno 0f\n"
11440 + "sub %3,%2\n"
11441 + "int $4\n0:\n"
11442 + _ASM_EXTABLE(0b, 0b)
11443 +#endif
11444 +
11445 " jle 2f\n\t"
11446 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11447 " jnz 1b\n\t"
11448 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11449 long tmp;
11450 asm volatile("# beginning down_write\n\t"
11451 LOCK_PREFIX " xadd %1,(%2)\n\t"
11452 +
11453 +#ifdef CONFIG_PAX_REFCOUNT
11454 + "jno 0f\n"
11455 + "mov %1,(%2)\n"
11456 + "int $4\n0:\n"
11457 + _ASM_EXTABLE(0b, 0b)
11458 +#endif
11459 +
11460 /* adds 0xffff0001, returns the old value */
11461 " test %1,%1\n\t"
11462 /* was the count 0 before? */
11463 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11464 long tmp;
11465 asm volatile("# beginning __up_read\n\t"
11466 LOCK_PREFIX " xadd %1,(%2)\n\t"
11467 +
11468 +#ifdef CONFIG_PAX_REFCOUNT
11469 + "jno 0f\n"
11470 + "mov %1,(%2)\n"
11471 + "int $4\n0:\n"
11472 + _ASM_EXTABLE(0b, 0b)
11473 +#endif
11474 +
11475 /* subtracts 1, returns the old value */
11476 " jns 1f\n\t"
11477 " call call_rwsem_wake\n" /* expects old value in %edx */
11478 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11479 long tmp;
11480 asm volatile("# beginning __up_write\n\t"
11481 LOCK_PREFIX " xadd %1,(%2)\n\t"
11482 +
11483 +#ifdef CONFIG_PAX_REFCOUNT
11484 + "jno 0f\n"
11485 + "mov %1,(%2)\n"
11486 + "int $4\n0:\n"
11487 + _ASM_EXTABLE(0b, 0b)
11488 +#endif
11489 +
11490 /* subtracts 0xffff0001, returns the old value */
11491 " jns 1f\n\t"
11492 " call call_rwsem_wake\n" /* expects old value in %edx */
11493 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11494 {
11495 asm volatile("# beginning __downgrade_write\n\t"
11496 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11497 +
11498 +#ifdef CONFIG_PAX_REFCOUNT
11499 + "jno 0f\n"
11500 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11501 + "int $4\n0:\n"
11502 + _ASM_EXTABLE(0b, 0b)
11503 +#endif
11504 +
11505 /*
11506 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11507 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11508 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11509 */
11510 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11511 {
11512 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11513 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11514 +
11515 +#ifdef CONFIG_PAX_REFCOUNT
11516 + "jno 0f\n"
11517 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11518 + "int $4\n0:\n"
11519 + _ASM_EXTABLE(0b, 0b)
11520 +#endif
11521 +
11522 : "+m" (sem->count)
11523 : "er" (delta));
11524 }
11525 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11526 */
11527 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11528 {
11529 - return delta + xadd(&sem->count, delta);
11530 + return delta + xadd_check_overflow(&sem->count, delta);
11531 }
11532
11533 #endif /* __KERNEL__ */
11534 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11535 index 5e64171..f58957e 100644
11536 --- a/arch/x86/include/asm/segment.h
11537 +++ b/arch/x86/include/asm/segment.h
11538 @@ -64,10 +64,15 @@
11539 * 26 - ESPFIX small SS
11540 * 27 - per-cpu [ offset to per-cpu data area ]
11541 * 28 - stack_canary-20 [ for stack protector ]
11542 - * 29 - unused
11543 - * 30 - unused
11544 + * 29 - PCI BIOS CS
11545 + * 30 - PCI BIOS DS
11546 * 31 - TSS for double fault handler
11547 */
11548 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11549 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11550 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11551 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11552 +
11553 #define GDT_ENTRY_TLS_MIN 6
11554 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11555
11556 @@ -79,6 +84,8 @@
11557
11558 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11559
11560 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11561 +
11562 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11563
11564 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11565 @@ -104,6 +111,12 @@
11566 #define __KERNEL_STACK_CANARY 0
11567 #endif
11568
11569 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11570 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11571 +
11572 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11573 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11574 +
11575 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11576
11577 /*
11578 @@ -141,7 +154,7 @@
11579 */
11580
11581 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11582 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11583 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11584
11585
11586 #else
11587 @@ -165,6 +178,8 @@
11588 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11589 #define __USER32_DS __USER_DS
11590
11591 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11592 +
11593 #define GDT_ENTRY_TSS 8 /* needs two entries */
11594 #define GDT_ENTRY_LDT 10 /* needs two entries */
11595 #define GDT_ENTRY_TLS_MIN 12
11596 @@ -185,6 +200,7 @@
11597 #endif
11598
11599 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11600 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11601 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11602 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11603 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11604 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11605 index 0434c40..1714bf0 100644
11606 --- a/arch/x86/include/asm/smp.h
11607 +++ b/arch/x86/include/asm/smp.h
11608 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11609 /* cpus sharing the last level cache: */
11610 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11611 DECLARE_PER_CPU(u16, cpu_llc_id);
11612 -DECLARE_PER_CPU(int, cpu_number);
11613 +DECLARE_PER_CPU(unsigned int, cpu_number);
11614
11615 static inline struct cpumask *cpu_sibling_mask(int cpu)
11616 {
11617 @@ -77,7 +77,7 @@ struct smp_ops {
11618
11619 void (*send_call_func_ipi)(const struct cpumask *mask);
11620 void (*send_call_func_single_ipi)(int cpu);
11621 -};
11622 +} __no_const;
11623
11624 /* Globals due to paravirt */
11625 extern void set_cpu_sibling_map(int cpu);
11626 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11627 extern int safe_smp_processor_id(void);
11628
11629 #elif defined(CONFIG_X86_64_SMP)
11630 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11631 -
11632 -#define stack_smp_processor_id() \
11633 -({ \
11634 - struct thread_info *ti; \
11635 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11636 - ti->cpu; \
11637 -})
11638 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11639 +#define stack_smp_processor_id() raw_smp_processor_id()
11640 #define safe_smp_processor_id() smp_processor_id()
11641
11642 #endif
11643 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11644 index a82c2bf..2198f61 100644
11645 --- a/arch/x86/include/asm/spinlock.h
11646 +++ b/arch/x86/include/asm/spinlock.h
11647 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11648 static inline void arch_read_lock(arch_rwlock_t *rw)
11649 {
11650 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11651 +
11652 +#ifdef CONFIG_PAX_REFCOUNT
11653 + "jno 0f\n"
11654 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11655 + "int $4\n0:\n"
11656 + _ASM_EXTABLE(0b, 0b)
11657 +#endif
11658 +
11659 "jns 1f\n"
11660 "call __read_lock_failed\n\t"
11661 "1:\n"
11662 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11663 static inline void arch_write_lock(arch_rwlock_t *rw)
11664 {
11665 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11666 +
11667 +#ifdef CONFIG_PAX_REFCOUNT
11668 + "jno 0f\n"
11669 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11670 + "int $4\n0:\n"
11671 + _ASM_EXTABLE(0b, 0b)
11672 +#endif
11673 +
11674 "jz 1f\n"
11675 "call __write_lock_failed\n\t"
11676 "1:\n"
11677 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11678
11679 static inline void arch_read_unlock(arch_rwlock_t *rw)
11680 {
11681 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11682 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11683 +
11684 +#ifdef CONFIG_PAX_REFCOUNT
11685 + "jno 0f\n"
11686 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11687 + "int $4\n0:\n"
11688 + _ASM_EXTABLE(0b, 0b)
11689 +#endif
11690 +
11691 :"+m" (rw->lock) : : "memory");
11692 }
11693
11694 static inline void arch_write_unlock(arch_rwlock_t *rw)
11695 {
11696 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11697 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11698 +
11699 +#ifdef CONFIG_PAX_REFCOUNT
11700 + "jno 0f\n"
11701 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11702 + "int $4\n0:\n"
11703 + _ASM_EXTABLE(0b, 0b)
11704 +#endif
11705 +
11706 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11707 }
11708
11709 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11710 index 1575177..cb23f52 100644
11711 --- a/arch/x86/include/asm/stackprotector.h
11712 +++ b/arch/x86/include/asm/stackprotector.h
11713 @@ -48,7 +48,7 @@
11714 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11715 */
11716 #define GDT_STACK_CANARY_INIT \
11717 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11718 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11719
11720 /*
11721 * Initialize the stackprotector canary value.
11722 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11723
11724 static inline void load_stack_canary_segment(void)
11725 {
11726 -#ifdef CONFIG_X86_32
11727 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11728 asm volatile ("mov %0, %%gs" : : "r" (0));
11729 #endif
11730 }
11731 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11732 index 70bbe39..4ae2bd4 100644
11733 --- a/arch/x86/include/asm/stacktrace.h
11734 +++ b/arch/x86/include/asm/stacktrace.h
11735 @@ -11,28 +11,20 @@
11736
11737 extern int kstack_depth_to_print;
11738
11739 -struct thread_info;
11740 +struct task_struct;
11741 struct stacktrace_ops;
11742
11743 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11744 - unsigned long *stack,
11745 - unsigned long bp,
11746 - const struct stacktrace_ops *ops,
11747 - void *data,
11748 - unsigned long *end,
11749 - int *graph);
11750 +typedef unsigned long walk_stack_t(struct task_struct *task,
11751 + void *stack_start,
11752 + unsigned long *stack,
11753 + unsigned long bp,
11754 + const struct stacktrace_ops *ops,
11755 + void *data,
11756 + unsigned long *end,
11757 + int *graph);
11758
11759 -extern unsigned long
11760 -print_context_stack(struct thread_info *tinfo,
11761 - unsigned long *stack, unsigned long bp,
11762 - const struct stacktrace_ops *ops, void *data,
11763 - unsigned long *end, int *graph);
11764 -
11765 -extern unsigned long
11766 -print_context_stack_bp(struct thread_info *tinfo,
11767 - unsigned long *stack, unsigned long bp,
11768 - const struct stacktrace_ops *ops, void *data,
11769 - unsigned long *end, int *graph);
11770 +extern walk_stack_t print_context_stack;
11771 +extern walk_stack_t print_context_stack_bp;
11772
11773 /* Generic stack tracer with callbacks */
11774
11775 @@ -40,7 +32,7 @@ struct stacktrace_ops {
11776 void (*address)(void *data, unsigned long address, int reliable);
11777 /* On negative return stop dumping */
11778 int (*stack)(void *data, char *name);
11779 - walk_stack_t walk_stack;
11780 + walk_stack_t *walk_stack;
11781 };
11782
11783 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11784 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11785 index cb23852..2dde194 100644
11786 --- a/arch/x86/include/asm/sys_ia32.h
11787 +++ b/arch/x86/include/asm/sys_ia32.h
11788 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11789 compat_sigset_t __user *, unsigned int);
11790 asmlinkage long sys32_alarm(unsigned int);
11791
11792 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11793 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11794 asmlinkage long sys32_sysfs(int, u32, u32);
11795
11796 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11797 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11798 index 2d2f01c..f985723 100644
11799 --- a/arch/x86/include/asm/system.h
11800 +++ b/arch/x86/include/asm/system.h
11801 @@ -129,7 +129,7 @@ do { \
11802 "call __switch_to\n\t" \
11803 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11804 __switch_canary \
11805 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11806 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11807 "movq %%rax,%%rdi\n\t" \
11808 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11809 "jnz ret_from_fork\n\t" \
11810 @@ -140,7 +140,7 @@ do { \
11811 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11812 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11813 [_tif_fork] "i" (_TIF_FORK), \
11814 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11815 + [thread_info] "m" (current_tinfo), \
11816 [current_task] "m" (current_task) \
11817 __switch_canary_iparam \
11818 : "memory", "cc" __EXTRA_CLOBBER)
11819 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11820 {
11821 unsigned long __limit;
11822 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11823 - return __limit + 1;
11824 + return __limit;
11825 }
11826
11827 static inline void native_clts(void)
11828 @@ -397,13 +397,13 @@ void enable_hlt(void);
11829
11830 void cpu_idle_wait(void);
11831
11832 -extern unsigned long arch_align_stack(unsigned long sp);
11833 +#define arch_align_stack(x) ((x) & ~0xfUL)
11834 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11835
11836 void default_idle(void);
11837 bool set_pm_idle_to_default(void);
11838
11839 -void stop_this_cpu(void *dummy);
11840 +void stop_this_cpu(void *dummy) __noreturn;
11841
11842 /*
11843 * Force strict CPU ordering.
11844 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11845 index cfd8144..1b1127d 100644
11846 --- a/arch/x86/include/asm/thread_info.h
11847 +++ b/arch/x86/include/asm/thread_info.h
11848 @@ -10,6 +10,7 @@
11849 #include <linux/compiler.h>
11850 #include <asm/page.h>
11851 #include <asm/types.h>
11852 +#include <asm/percpu.h>
11853
11854 /*
11855 * low level task data that entry.S needs immediate access to
11856 @@ -24,7 +25,6 @@ struct exec_domain;
11857 #include <linux/atomic.h>
11858
11859 struct thread_info {
11860 - struct task_struct *task; /* main task structure */
11861 struct exec_domain *exec_domain; /* execution domain */
11862 __u32 flags; /* low level flags */
11863 __u32 status; /* thread synchronous flags */
11864 @@ -34,19 +34,13 @@ struct thread_info {
11865 mm_segment_t addr_limit;
11866 struct restart_block restart_block;
11867 void __user *sysenter_return;
11868 -#ifdef CONFIG_X86_32
11869 - unsigned long previous_esp; /* ESP of the previous stack in
11870 - case of nested (IRQ) stacks
11871 - */
11872 - __u8 supervisor_stack[0];
11873 -#endif
11874 + unsigned long lowest_stack;
11875 unsigned int sig_on_uaccess_error:1;
11876 unsigned int uaccess_err:1; /* uaccess failed */
11877 };
11878
11879 -#define INIT_THREAD_INFO(tsk) \
11880 +#define INIT_THREAD_INFO \
11881 { \
11882 - .task = &tsk, \
11883 .exec_domain = &default_exec_domain, \
11884 .flags = 0, \
11885 .cpu = 0, \
11886 @@ -57,7 +51,7 @@ struct thread_info {
11887 }, \
11888 }
11889
11890 -#define init_thread_info (init_thread_union.thread_info)
11891 +#define init_thread_info (init_thread_union.stack)
11892 #define init_stack (init_thread_union.stack)
11893
11894 #else /* !__ASSEMBLY__ */
11895 @@ -169,45 +163,40 @@ struct thread_info {
11896 ret; \
11897 })
11898
11899 -#ifdef CONFIG_X86_32
11900 -
11901 -#define STACK_WARN (THREAD_SIZE/8)
11902 -/*
11903 - * macros/functions for gaining access to the thread information structure
11904 - *
11905 - * preempt_count needs to be 1 initially, until the scheduler is functional.
11906 - */
11907 -#ifndef __ASSEMBLY__
11908 -
11909 -
11910 -/* how to get the current stack pointer from C */
11911 -register unsigned long current_stack_pointer asm("esp") __used;
11912 -
11913 -/* how to get the thread information struct from C */
11914 -static inline struct thread_info *current_thread_info(void)
11915 -{
11916 - return (struct thread_info *)
11917 - (current_stack_pointer & ~(THREAD_SIZE - 1));
11918 -}
11919 -
11920 -#else /* !__ASSEMBLY__ */
11921 -
11922 +#ifdef __ASSEMBLY__
11923 /* how to get the thread information struct from ASM */
11924 #define GET_THREAD_INFO(reg) \
11925 - movl $-THREAD_SIZE, reg; \
11926 - andl %esp, reg
11927 + mov PER_CPU_VAR(current_tinfo), reg
11928
11929 /* use this one if reg already contains %esp */
11930 -#define GET_THREAD_INFO_WITH_ESP(reg) \
11931 - andl $-THREAD_SIZE, reg
11932 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
11933 +#else
11934 +/* how to get the thread information struct from C */
11935 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
11936 +
11937 +static __always_inline struct thread_info *current_thread_info(void)
11938 +{
11939 + return percpu_read_stable(current_tinfo);
11940 +}
11941 +#endif
11942 +
11943 +#ifdef CONFIG_X86_32
11944 +
11945 +#define STACK_WARN (THREAD_SIZE/8)
11946 +/*
11947 + * macros/functions for gaining access to the thread information structure
11948 + *
11949 + * preempt_count needs to be 1 initially, until the scheduler is functional.
11950 + */
11951 +#ifndef __ASSEMBLY__
11952 +
11953 +/* how to get the current stack pointer from C */
11954 +register unsigned long current_stack_pointer asm("esp") __used;
11955
11956 #endif
11957
11958 #else /* X86_32 */
11959
11960 -#include <asm/percpu.h>
11961 -#define KERNEL_STACK_OFFSET (5*8)
11962 -
11963 /*
11964 * macros/functions for gaining access to the thread information structure
11965 * preempt_count needs to be 1 initially, until the scheduler is functional.
11966 @@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void)
11967 #ifndef __ASSEMBLY__
11968 DECLARE_PER_CPU(unsigned long, kernel_stack);
11969
11970 -static inline struct thread_info *current_thread_info(void)
11971 -{
11972 - struct thread_info *ti;
11973 - ti = (void *)(percpu_read_stable(kernel_stack) +
11974 - KERNEL_STACK_OFFSET - THREAD_SIZE);
11975 - return ti;
11976 -}
11977 -
11978 -#else /* !__ASSEMBLY__ */
11979 -
11980 -/* how to get the thread information struct from ASM */
11981 -#define GET_THREAD_INFO(reg) \
11982 - movq PER_CPU_VAR(kernel_stack),reg ; \
11983 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
11984 -
11985 -/*
11986 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
11987 - * a certain register (to be used in assembler memory operands).
11988 - */
11989 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
11990 -
11991 +/* how to get the current stack pointer from C */
11992 +register unsigned long current_stack_pointer asm("rsp") __used;
11993 #endif
11994
11995 #endif /* !X86_32 */
11996 @@ -269,5 +239,16 @@ extern void arch_task_cache_init(void);
11997 extern void free_thread_info(struct thread_info *ti);
11998 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
11999 #define arch_task_cache_init arch_task_cache_init
12000 +
12001 +#define __HAVE_THREAD_FUNCTIONS
12002 +#define task_thread_info(task) (&(task)->tinfo)
12003 +#define task_stack_page(task) ((task)->stack)
12004 +#define setup_thread_stack(p, org) do {} while (0)
12005 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12006 +
12007 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12008 +extern struct task_struct *alloc_task_struct_node(int node);
12009 +extern void free_task_struct(struct task_struct *);
12010 +
12011 #endif
12012 #endif /* _ASM_X86_THREAD_INFO_H */
12013 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12014 index 8be5f54..7ae826d 100644
12015 --- a/arch/x86/include/asm/uaccess.h
12016 +++ b/arch/x86/include/asm/uaccess.h
12017 @@ -7,12 +7,15 @@
12018 #include <linux/compiler.h>
12019 #include <linux/thread_info.h>
12020 #include <linux/string.h>
12021 +#include <linux/sched.h>
12022 #include <asm/asm.h>
12023 #include <asm/page.h>
12024
12025 #define VERIFY_READ 0
12026 #define VERIFY_WRITE 1
12027
12028 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12029 +
12030 /*
12031 * The fs value determines whether argument validity checking should be
12032 * performed or not. If get_fs() == USER_DS, checking is performed, with
12033 @@ -28,7 +31,12 @@
12034
12035 #define get_ds() (KERNEL_DS)
12036 #define get_fs() (current_thread_info()->addr_limit)
12037 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12038 +void __set_fs(mm_segment_t x);
12039 +void set_fs(mm_segment_t x);
12040 +#else
12041 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12042 +#endif
12043
12044 #define segment_eq(a, b) ((a).seg == (b).seg)
12045
12046 @@ -76,7 +84,33 @@
12047 * checks that the pointer is in the user space range - after calling
12048 * this function, memory access functions may still return -EFAULT.
12049 */
12050 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12051 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12052 +#define access_ok(type, addr, size) \
12053 +({ \
12054 + long __size = size; \
12055 + unsigned long __addr = (unsigned long)addr; \
12056 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12057 + unsigned long __end_ao = __addr + __size - 1; \
12058 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12059 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12060 + while(__addr_ao <= __end_ao) { \
12061 + char __c_ao; \
12062 + __addr_ao += PAGE_SIZE; \
12063 + if (__size > PAGE_SIZE) \
12064 + cond_resched(); \
12065 + if (__get_user(__c_ao, (char __user *)__addr)) \
12066 + break; \
12067 + if (type != VERIFY_WRITE) { \
12068 + __addr = __addr_ao; \
12069 + continue; \
12070 + } \
12071 + if (__put_user(__c_ao, (char __user *)__addr)) \
12072 + break; \
12073 + __addr = __addr_ao; \
12074 + } \
12075 + } \
12076 + __ret_ao; \
12077 +})
12078
12079 /*
12080 * The exception table consists of pairs of addresses: the first is the
12081 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12082 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12083 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12084
12085 -
12086 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12087 +#define __copyuser_seg "gs;"
12088 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12089 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12090 +#else
12091 +#define __copyuser_seg
12092 +#define __COPYUSER_SET_ES
12093 +#define __COPYUSER_RESTORE_ES
12094 +#endif
12095
12096 #ifdef CONFIG_X86_32
12097 #define __put_user_asm_u64(x, addr, err, errret) \
12098 - asm volatile("1: movl %%eax,0(%2)\n" \
12099 - "2: movl %%edx,4(%2)\n" \
12100 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12101 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12102 "3:\n" \
12103 ".section .fixup,\"ax\"\n" \
12104 "4: movl %3,%0\n" \
12105 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12106 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12107
12108 #define __put_user_asm_ex_u64(x, addr) \
12109 - asm volatile("1: movl %%eax,0(%1)\n" \
12110 - "2: movl %%edx,4(%1)\n" \
12111 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12112 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12113 "3:\n" \
12114 _ASM_EXTABLE(1b, 2b - 1b) \
12115 _ASM_EXTABLE(2b, 3b - 2b) \
12116 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12117 __typeof__(*(ptr)) __pu_val; \
12118 __chk_user_ptr(ptr); \
12119 might_fault(); \
12120 - __pu_val = x; \
12121 + __pu_val = (x); \
12122 switch (sizeof(*(ptr))) { \
12123 case 1: \
12124 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12125 @@ -373,7 +415,7 @@ do { \
12126 } while (0)
12127
12128 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12129 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12130 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12131 "2:\n" \
12132 ".section .fixup,\"ax\"\n" \
12133 "3: mov %3,%0\n" \
12134 @@ -381,7 +423,7 @@ do { \
12135 " jmp 2b\n" \
12136 ".previous\n" \
12137 _ASM_EXTABLE(1b, 3b) \
12138 - : "=r" (err), ltype(x) \
12139 + : "=r" (err), ltype (x) \
12140 : "m" (__m(addr)), "i" (errret), "0" (err))
12141
12142 #define __get_user_size_ex(x, ptr, size) \
12143 @@ -406,7 +448,7 @@ do { \
12144 } while (0)
12145
12146 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12147 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12148 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12149 "2:\n" \
12150 _ASM_EXTABLE(1b, 2b - 1b) \
12151 : ltype(x) : "m" (__m(addr)))
12152 @@ -423,13 +465,24 @@ do { \
12153 int __gu_err; \
12154 unsigned long __gu_val; \
12155 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12156 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12157 + (x) = (__typeof__(*(ptr)))__gu_val; \
12158 __gu_err; \
12159 })
12160
12161 /* FIXME: this hack is definitely wrong -AK */
12162 struct __large_struct { unsigned long buf[100]; };
12163 -#define __m(x) (*(struct __large_struct __user *)(x))
12164 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12165 +#define ____m(x) \
12166 +({ \
12167 + unsigned long ____x = (unsigned long)(x); \
12168 + if (____x < PAX_USER_SHADOW_BASE) \
12169 + ____x += PAX_USER_SHADOW_BASE; \
12170 + (void __user *)____x; \
12171 +})
12172 +#else
12173 +#define ____m(x) (x)
12174 +#endif
12175 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12176
12177 /*
12178 * Tell gcc we read from memory instead of writing: this is because
12179 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12180 * aliasing issues.
12181 */
12182 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12183 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12184 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12185 "2:\n" \
12186 ".section .fixup,\"ax\"\n" \
12187 "3: mov %3,%0\n" \
12188 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12189 ".previous\n" \
12190 _ASM_EXTABLE(1b, 3b) \
12191 : "=r"(err) \
12192 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12193 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12194
12195 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12196 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12197 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12198 "2:\n" \
12199 _ASM_EXTABLE(1b, 2b - 1b) \
12200 : : ltype(x), "m" (__m(addr)))
12201 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12202 * On error, the variable @x is set to zero.
12203 */
12204
12205 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12206 +#define __get_user(x, ptr) get_user((x), (ptr))
12207 +#else
12208 #define __get_user(x, ptr) \
12209 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12210 +#endif
12211
12212 /**
12213 * __put_user: - Write a simple value into user space, with less checking.
12214 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12215 * Returns zero on success, or -EFAULT on error.
12216 */
12217
12218 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12219 +#define __put_user(x, ptr) put_user((x), (ptr))
12220 +#else
12221 #define __put_user(x, ptr) \
12222 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12223 +#endif
12224
12225 #define __get_user_unaligned __get_user
12226 #define __put_user_unaligned __put_user
12227 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12228 #define get_user_ex(x, ptr) do { \
12229 unsigned long __gue_val; \
12230 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12231 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12232 + (x) = (__typeof__(*(ptr)))__gue_val; \
12233 } while (0)
12234
12235 #ifdef CONFIG_X86_WP_WORKS_OK
12236 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12237 index 566e803..b9521e9 100644
12238 --- a/arch/x86/include/asm/uaccess_32.h
12239 +++ b/arch/x86/include/asm/uaccess_32.h
12240 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12241 static __always_inline unsigned long __must_check
12242 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12243 {
12244 + if ((long)n < 0)
12245 + return n;
12246 +
12247 if (__builtin_constant_p(n)) {
12248 unsigned long ret;
12249
12250 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12251 return ret;
12252 }
12253 }
12254 + if (!__builtin_constant_p(n))
12255 + check_object_size(from, n, true);
12256 return __copy_to_user_ll(to, from, n);
12257 }
12258
12259 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12260 __copy_to_user(void __user *to, const void *from, unsigned long n)
12261 {
12262 might_fault();
12263 +
12264 return __copy_to_user_inatomic(to, from, n);
12265 }
12266
12267 static __always_inline unsigned long
12268 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12269 {
12270 + if ((long)n < 0)
12271 + return n;
12272 +
12273 /* Avoid zeroing the tail if the copy fails..
12274 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12275 * but as the zeroing behaviour is only significant when n is not
12276 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12277 __copy_from_user(void *to, const void __user *from, unsigned long n)
12278 {
12279 might_fault();
12280 +
12281 + if ((long)n < 0)
12282 + return n;
12283 +
12284 if (__builtin_constant_p(n)) {
12285 unsigned long ret;
12286
12287 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12288 return ret;
12289 }
12290 }
12291 + if (!__builtin_constant_p(n))
12292 + check_object_size(to, n, false);
12293 return __copy_from_user_ll(to, from, n);
12294 }
12295
12296 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12297 const void __user *from, unsigned long n)
12298 {
12299 might_fault();
12300 +
12301 + if ((long)n < 0)
12302 + return n;
12303 +
12304 if (__builtin_constant_p(n)) {
12305 unsigned long ret;
12306
12307 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12308 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12309 unsigned long n)
12310 {
12311 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12312 + if ((long)n < 0)
12313 + return n;
12314 +
12315 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12316 }
12317
12318 -unsigned long __must_check copy_to_user(void __user *to,
12319 - const void *from, unsigned long n);
12320 -unsigned long __must_check _copy_from_user(void *to,
12321 - const void __user *from,
12322 - unsigned long n);
12323 -
12324 +extern void copy_to_user_overflow(void)
12325 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12326 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12327 +#else
12328 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12329 +#endif
12330 +;
12331
12332 extern void copy_from_user_overflow(void)
12333 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12334 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12335 #endif
12336 ;
12337
12338 -static inline unsigned long __must_check copy_from_user(void *to,
12339 - const void __user *from,
12340 - unsigned long n)
12341 +/**
12342 + * copy_to_user: - Copy a block of data into user space.
12343 + * @to: Destination address, in user space.
12344 + * @from: Source address, in kernel space.
12345 + * @n: Number of bytes to copy.
12346 + *
12347 + * Context: User context only. This function may sleep.
12348 + *
12349 + * Copy data from kernel space to user space.
12350 + *
12351 + * Returns number of bytes that could not be copied.
12352 + * On success, this will be zero.
12353 + */
12354 +static inline unsigned long __must_check
12355 +copy_to_user(void __user *to, const void *from, unsigned long n)
12356 +{
12357 + int sz = __compiletime_object_size(from);
12358 +
12359 + if (unlikely(sz != -1 && sz < n))
12360 + copy_to_user_overflow();
12361 + else if (access_ok(VERIFY_WRITE, to, n))
12362 + n = __copy_to_user(to, from, n);
12363 + return n;
12364 +}
12365 +
12366 +/**
12367 + * copy_from_user: - Copy a block of data from user space.
12368 + * @to: Destination address, in kernel space.
12369 + * @from: Source address, in user space.
12370 + * @n: Number of bytes to copy.
12371 + *
12372 + * Context: User context only. This function may sleep.
12373 + *
12374 + * Copy data from user space to kernel space.
12375 + *
12376 + * Returns number of bytes that could not be copied.
12377 + * On success, this will be zero.
12378 + *
12379 + * If some data could not be copied, this function will pad the copied
12380 + * data to the requested size using zero bytes.
12381 + */
12382 +static inline unsigned long __must_check
12383 +copy_from_user(void *to, const void __user *from, unsigned long n)
12384 {
12385 int sz = __compiletime_object_size(to);
12386
12387 - if (likely(sz == -1 || sz >= n))
12388 - n = _copy_from_user(to, from, n);
12389 - else
12390 + if (unlikely(sz != -1 && sz < n))
12391 copy_from_user_overflow();
12392 -
12393 + else if (access_ok(VERIFY_READ, from, n))
12394 + n = __copy_from_user(to, from, n);
12395 + else if ((long)n > 0) {
12396 + if (!__builtin_constant_p(n))
12397 + check_object_size(to, n, false);
12398 + memset(to, 0, n);
12399 + }
12400 return n;
12401 }
12402
12403 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12404 index 1c66d30..e66922c 100644
12405 --- a/arch/x86/include/asm/uaccess_64.h
12406 +++ b/arch/x86/include/asm/uaccess_64.h
12407 @@ -10,6 +10,9 @@
12408 #include <asm/alternative.h>
12409 #include <asm/cpufeature.h>
12410 #include <asm/page.h>
12411 +#include <asm/pgtable.h>
12412 +
12413 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12414
12415 /*
12416 * Copy To/From Userspace
12417 @@ -17,12 +20,12 @@
12418
12419 /* Handles exceptions in both to and from, but doesn't do access_ok */
12420 __must_check unsigned long
12421 -copy_user_generic_string(void *to, const void *from, unsigned len);
12422 +copy_user_generic_string(void *to, const void *from, unsigned long len);
12423 __must_check unsigned long
12424 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12425 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
12426
12427 static __always_inline __must_check unsigned long
12428 -copy_user_generic(void *to, const void *from, unsigned len)
12429 +copy_user_generic(void *to, const void *from, unsigned long len)
12430 {
12431 unsigned ret;
12432
12433 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
12434 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12435 "=d" (len)),
12436 "1" (to), "2" (from), "3" (len)
12437 - : "memory", "rcx", "r8", "r9", "r10", "r11");
12438 + : "memory", "rcx", "r8", "r9", "r11");
12439 return ret;
12440 }
12441
12442 +static __always_inline __must_check unsigned long
12443 +__copy_to_user(void __user *to, const void *from, unsigned long len);
12444 +static __always_inline __must_check unsigned long
12445 +__copy_from_user(void *to, const void __user *from, unsigned long len);
12446 __must_check unsigned long
12447 -_copy_to_user(void __user *to, const void *from, unsigned len);
12448 -__must_check unsigned long
12449 -_copy_from_user(void *to, const void __user *from, unsigned len);
12450 -__must_check unsigned long
12451 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12452 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12453
12454 static inline unsigned long __must_check copy_from_user(void *to,
12455 const void __user *from,
12456 unsigned long n)
12457 {
12458 - int sz = __compiletime_object_size(to);
12459 -
12460 might_fault();
12461 - if (likely(sz == -1 || sz >= n))
12462 - n = _copy_from_user(to, from, n);
12463 -#ifdef CONFIG_DEBUG_VM
12464 - else
12465 - WARN(1, "Buffer overflow detected!\n");
12466 -#endif
12467 +
12468 + if (access_ok(VERIFY_READ, from, n))
12469 + n = __copy_from_user(to, from, n);
12470 + else if (n < INT_MAX) {
12471 + if (!__builtin_constant_p(n))
12472 + check_object_size(to, n, false);
12473 + memset(to, 0, n);
12474 + }
12475 return n;
12476 }
12477
12478 static __always_inline __must_check
12479 -int copy_to_user(void __user *dst, const void *src, unsigned size)
12480 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
12481 {
12482 might_fault();
12483
12484 - return _copy_to_user(dst, src, size);
12485 + if (access_ok(VERIFY_WRITE, dst, size))
12486 + size = __copy_to_user(dst, src, size);
12487 + return size;
12488 }
12489
12490 static __always_inline __must_check
12491 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12492 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12493 {
12494 - int ret = 0;
12495 + int sz = __compiletime_object_size(dst);
12496 + unsigned ret = 0;
12497
12498 might_fault();
12499 - if (!__builtin_constant_p(size))
12500 - return copy_user_generic(dst, (__force void *)src, size);
12501 +
12502 + if (size > INT_MAX)
12503 + return size;
12504 +
12505 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12506 + if (!__access_ok(VERIFY_READ, src, size))
12507 + return size;
12508 +#endif
12509 +
12510 + if (unlikely(sz != -1 && sz < size)) {
12511 +#ifdef CONFIG_DEBUG_VM
12512 + WARN(1, "Buffer overflow detected!\n");
12513 +#endif
12514 + return size;
12515 + }
12516 +
12517 + if (!__builtin_constant_p(size)) {
12518 + check_object_size(dst, size, false);
12519 +
12520 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12521 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12522 + src += PAX_USER_SHADOW_BASE;
12523 +#endif
12524 +
12525 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12526 + }
12527 switch (size) {
12528 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12529 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12530 ret, "b", "b", "=q", 1);
12531 return ret;
12532 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12533 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12534 ret, "w", "w", "=r", 2);
12535 return ret;
12536 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12537 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12538 ret, "l", "k", "=r", 4);
12539 return ret;
12540 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12541 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12542 ret, "q", "", "=r", 8);
12543 return ret;
12544 case 10:
12545 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12546 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12547 ret, "q", "", "=r", 10);
12548 if (unlikely(ret))
12549 return ret;
12550 __get_user_asm(*(u16 *)(8 + (char *)dst),
12551 - (u16 __user *)(8 + (char __user *)src),
12552 + (const u16 __user *)(8 + (const char __user *)src),
12553 ret, "w", "w", "=r", 2);
12554 return ret;
12555 case 16:
12556 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12557 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12558 ret, "q", "", "=r", 16);
12559 if (unlikely(ret))
12560 return ret;
12561 __get_user_asm(*(u64 *)(8 + (char *)dst),
12562 - (u64 __user *)(8 + (char __user *)src),
12563 + (const u64 __user *)(8 + (const char __user *)src),
12564 ret, "q", "", "=r", 8);
12565 return ret;
12566 default:
12567 - return copy_user_generic(dst, (__force void *)src, size);
12568 +
12569 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12570 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12571 + src += PAX_USER_SHADOW_BASE;
12572 +#endif
12573 +
12574 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12575 }
12576 }
12577
12578 static __always_inline __must_check
12579 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12580 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12581 {
12582 - int ret = 0;
12583 + int sz = __compiletime_object_size(src);
12584 + unsigned ret = 0;
12585
12586 might_fault();
12587 - if (!__builtin_constant_p(size))
12588 - return copy_user_generic((__force void *)dst, src, size);
12589 +
12590 + if (size > INT_MAX)
12591 + return size;
12592 +
12593 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12594 + if (!__access_ok(VERIFY_WRITE, dst, size))
12595 + return size;
12596 +#endif
12597 +
12598 + if (unlikely(sz != -1 && sz < size)) {
12599 +#ifdef CONFIG_DEBUG_VM
12600 + WARN(1, "Buffer overflow detected!\n");
12601 +#endif
12602 + return size;
12603 + }
12604 +
12605 + if (!__builtin_constant_p(size)) {
12606 + check_object_size(src, size, true);
12607 +
12608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12609 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12610 + dst += PAX_USER_SHADOW_BASE;
12611 +#endif
12612 +
12613 + return copy_user_generic((__force_kernel void *)dst, src, size);
12614 + }
12615 switch (size) {
12616 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12617 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12618 ret, "b", "b", "iq", 1);
12619 return ret;
12620 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12621 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12622 ret, "w", "w", "ir", 2);
12623 return ret;
12624 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12625 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12626 ret, "l", "k", "ir", 4);
12627 return ret;
12628 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12629 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12630 ret, "q", "", "er", 8);
12631 return ret;
12632 case 10:
12633 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12634 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12635 ret, "q", "", "er", 10);
12636 if (unlikely(ret))
12637 return ret;
12638 asm("":::"memory");
12639 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12640 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12641 ret, "w", "w", "ir", 2);
12642 return ret;
12643 case 16:
12644 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12645 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12646 ret, "q", "", "er", 16);
12647 if (unlikely(ret))
12648 return ret;
12649 asm("":::"memory");
12650 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12651 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12652 ret, "q", "", "er", 8);
12653 return ret;
12654 default:
12655 - return copy_user_generic((__force void *)dst, src, size);
12656 +
12657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12658 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659 + dst += PAX_USER_SHADOW_BASE;
12660 +#endif
12661 +
12662 + return copy_user_generic((__force_kernel void *)dst, src, size);
12663 }
12664 }
12665
12666 static __always_inline __must_check
12667 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12668 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12669 {
12670 - int ret = 0;
12671 + unsigned ret = 0;
12672
12673 might_fault();
12674 - if (!__builtin_constant_p(size))
12675 - return copy_user_generic((__force void *)dst,
12676 - (__force void *)src, size);
12677 +
12678 + if (size > INT_MAX)
12679 + return size;
12680 +
12681 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12682 + if (!__access_ok(VERIFY_READ, src, size))
12683 + return size;
12684 + if (!__access_ok(VERIFY_WRITE, dst, size))
12685 + return size;
12686 +#endif
12687 +
12688 + if (!__builtin_constant_p(size)) {
12689 +
12690 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12691 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12692 + src += PAX_USER_SHADOW_BASE;
12693 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12694 + dst += PAX_USER_SHADOW_BASE;
12695 +#endif
12696 +
12697 + return copy_user_generic((__force_kernel void *)dst,
12698 + (__force_kernel const void *)src, size);
12699 + }
12700 switch (size) {
12701 case 1: {
12702 u8 tmp;
12703 - __get_user_asm(tmp, (u8 __user *)src,
12704 + __get_user_asm(tmp, (const u8 __user *)src,
12705 ret, "b", "b", "=q", 1);
12706 if (likely(!ret))
12707 __put_user_asm(tmp, (u8 __user *)dst,
12708 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12709 }
12710 case 2: {
12711 u16 tmp;
12712 - __get_user_asm(tmp, (u16 __user *)src,
12713 + __get_user_asm(tmp, (const u16 __user *)src,
12714 ret, "w", "w", "=r", 2);
12715 if (likely(!ret))
12716 __put_user_asm(tmp, (u16 __user *)dst,
12717 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12718
12719 case 4: {
12720 u32 tmp;
12721 - __get_user_asm(tmp, (u32 __user *)src,
12722 + __get_user_asm(tmp, (const u32 __user *)src,
12723 ret, "l", "k", "=r", 4);
12724 if (likely(!ret))
12725 __put_user_asm(tmp, (u32 __user *)dst,
12726 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12727 }
12728 case 8: {
12729 u64 tmp;
12730 - __get_user_asm(tmp, (u64 __user *)src,
12731 + __get_user_asm(tmp, (const u64 __user *)src,
12732 ret, "q", "", "=r", 8);
12733 if (likely(!ret))
12734 __put_user_asm(tmp, (u64 __user *)dst,
12735 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12736 return ret;
12737 }
12738 default:
12739 - return copy_user_generic((__force void *)dst,
12740 - (__force void *)src, size);
12741 +
12742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12743 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12744 + src += PAX_USER_SHADOW_BASE;
12745 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12746 + dst += PAX_USER_SHADOW_BASE;
12747 +#endif
12748 +
12749 + return copy_user_generic((__force_kernel void *)dst,
12750 + (__force_kernel const void *)src, size);
12751 }
12752 }
12753
12754 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12755 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12756
12757 static __must_check __always_inline int
12758 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12759 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12760 {
12761 - return copy_user_generic(dst, (__force const void *)src, size);
12762 + if (size > INT_MAX)
12763 + return size;
12764 +
12765 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12766 + if (!__access_ok(VERIFY_READ, src, size))
12767 + return size;
12768 +
12769 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12770 + src += PAX_USER_SHADOW_BASE;
12771 +#endif
12772 +
12773 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12774 }
12775
12776 -static __must_check __always_inline int
12777 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12778 +static __must_check __always_inline unsigned long
12779 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12780 {
12781 - return copy_user_generic((__force void *)dst, src, size);
12782 + if (size > INT_MAX)
12783 + return size;
12784 +
12785 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12786 + if (!__access_ok(VERIFY_WRITE, dst, size))
12787 + return size;
12788 +
12789 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12790 + dst += PAX_USER_SHADOW_BASE;
12791 +#endif
12792 +
12793 + return copy_user_generic((__force_kernel void *)dst, src, size);
12794 }
12795
12796 -extern long __copy_user_nocache(void *dst, const void __user *src,
12797 - unsigned size, int zerorest);
12798 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12799 + unsigned long size, int zerorest);
12800
12801 -static inline int
12802 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12803 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12804 {
12805 might_sleep();
12806 +
12807 + if (size > INT_MAX)
12808 + return size;
12809 +
12810 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12811 + if (!__access_ok(VERIFY_READ, src, size))
12812 + return size;
12813 +#endif
12814 +
12815 return __copy_user_nocache(dst, src, size, 1);
12816 }
12817
12818 -static inline int
12819 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12820 - unsigned size)
12821 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12822 + unsigned long size)
12823 {
12824 + if (size > INT_MAX)
12825 + return size;
12826 +
12827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12828 + if (!__access_ok(VERIFY_READ, src, size))
12829 + return size;
12830 +#endif
12831 +
12832 return __copy_user_nocache(dst, src, size, 0);
12833 }
12834
12835 -unsigned long
12836 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12837 +extern unsigned long
12838 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12839
12840 #endif /* _ASM_X86_UACCESS_64_H */
12841 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12842 index bb05228..d763d5b 100644
12843 --- a/arch/x86/include/asm/vdso.h
12844 +++ b/arch/x86/include/asm/vdso.h
12845 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
12846 #define VDSO32_SYMBOL(base, name) \
12847 ({ \
12848 extern const char VDSO32_##name[]; \
12849 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12850 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12851 })
12852 #endif
12853
12854 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12855 index 517d476..a1cb4d9 100644
12856 --- a/arch/x86/include/asm/x86_init.h
12857 +++ b/arch/x86/include/asm/x86_init.h
12858 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
12859 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12860 void (*find_smp_config)(void);
12861 void (*get_smp_config)(unsigned int early);
12862 -};
12863 +} __no_const;
12864
12865 /**
12866 * struct x86_init_resources - platform specific resource related ops
12867 @@ -43,7 +43,7 @@ struct x86_init_resources {
12868 void (*probe_roms)(void);
12869 void (*reserve_resources)(void);
12870 char *(*memory_setup)(void);
12871 -};
12872 +} __no_const;
12873
12874 /**
12875 * struct x86_init_irqs - platform specific interrupt setup
12876 @@ -56,7 +56,7 @@ struct x86_init_irqs {
12877 void (*pre_vector_init)(void);
12878 void (*intr_init)(void);
12879 void (*trap_init)(void);
12880 -};
12881 +} __no_const;
12882
12883 /**
12884 * struct x86_init_oem - oem platform specific customizing functions
12885 @@ -66,7 +66,7 @@ struct x86_init_irqs {
12886 struct x86_init_oem {
12887 void (*arch_setup)(void);
12888 void (*banner)(void);
12889 -};
12890 +} __no_const;
12891
12892 /**
12893 * struct x86_init_mapping - platform specific initial kernel pagetable setup
12894 @@ -77,7 +77,7 @@ struct x86_init_oem {
12895 */
12896 struct x86_init_mapping {
12897 void (*pagetable_reserve)(u64 start, u64 end);
12898 -};
12899 +} __no_const;
12900
12901 /**
12902 * struct x86_init_paging - platform specific paging functions
12903 @@ -87,7 +87,7 @@ struct x86_init_mapping {
12904 struct x86_init_paging {
12905 void (*pagetable_setup_start)(pgd_t *base);
12906 void (*pagetable_setup_done)(pgd_t *base);
12907 -};
12908 +} __no_const;
12909
12910 /**
12911 * struct x86_init_timers - platform specific timer setup
12912 @@ -102,7 +102,7 @@ struct x86_init_timers {
12913 void (*tsc_pre_init)(void);
12914 void (*timer_init)(void);
12915 void (*wallclock_init)(void);
12916 -};
12917 +} __no_const;
12918
12919 /**
12920 * struct x86_init_iommu - platform specific iommu setup
12921 @@ -110,7 +110,7 @@ struct x86_init_timers {
12922 */
12923 struct x86_init_iommu {
12924 int (*iommu_init)(void);
12925 -};
12926 +} __no_const;
12927
12928 /**
12929 * struct x86_init_pci - platform specific pci init functions
12930 @@ -124,7 +124,7 @@ struct x86_init_pci {
12931 int (*init)(void);
12932 void (*init_irq)(void);
12933 void (*fixup_irqs)(void);
12934 -};
12935 +} __no_const;
12936
12937 /**
12938 * struct x86_init_ops - functions for platform specific setup
12939 @@ -140,7 +140,7 @@ struct x86_init_ops {
12940 struct x86_init_timers timers;
12941 struct x86_init_iommu iommu;
12942 struct x86_init_pci pci;
12943 -};
12944 +} __no_const;
12945
12946 /**
12947 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
12948 @@ -149,7 +149,7 @@ struct x86_init_ops {
12949 struct x86_cpuinit_ops {
12950 void (*setup_percpu_clockev)(void);
12951 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
12952 -};
12953 +} __no_const;
12954
12955 /**
12956 * struct x86_platform_ops - platform specific runtime functions
12957 @@ -171,7 +171,7 @@ struct x86_platform_ops {
12958 void (*nmi_init)(void);
12959 unsigned char (*get_nmi_reason)(void);
12960 int (*i8042_detect)(void);
12961 -};
12962 +} __no_const;
12963
12964 struct pci_dev;
12965
12966 @@ -180,7 +180,7 @@ struct x86_msi_ops {
12967 void (*teardown_msi_irq)(unsigned int irq);
12968 void (*teardown_msi_irqs)(struct pci_dev *dev);
12969 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
12970 -};
12971 +} __no_const;
12972
12973 extern struct x86_init_ops x86_init;
12974 extern struct x86_cpuinit_ops x86_cpuinit;
12975 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
12976 index c6ce245..ffbdab7 100644
12977 --- a/arch/x86/include/asm/xsave.h
12978 +++ b/arch/x86/include/asm/xsave.h
12979 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12980 {
12981 int err;
12982
12983 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12984 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
12985 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
12986 +#endif
12987 +
12988 /*
12989 * Clear the xsave header first, so that reserved fields are
12990 * initialized to zero.
12991 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12992 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
12993 {
12994 int err;
12995 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
12996 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
12997 u32 lmask = mask;
12998 u32 hmask = mask >> 32;
12999
13000 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13001 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13002 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13003 +#endif
13004 +
13005 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13006 "2:\n"
13007 ".section .fixup,\"ax\"\n"
13008 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13009 index 6a564ac..9b1340c 100644
13010 --- a/arch/x86/kernel/acpi/realmode/Makefile
13011 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13012 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13013 $(call cc-option, -fno-stack-protector) \
13014 $(call cc-option, -mpreferred-stack-boundary=2)
13015 KBUILD_CFLAGS += $(call cc-option, -m32)
13016 +ifdef CONSTIFY_PLUGIN
13017 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13018 +endif
13019 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13020 GCOV_PROFILE := n
13021
13022 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13023 index b4fd836..4358fe3 100644
13024 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13025 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13026 @@ -108,6 +108,9 @@ wakeup_code:
13027 /* Do any other stuff... */
13028
13029 #ifndef CONFIG_64BIT
13030 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13031 + call verify_cpu
13032 +
13033 /* This could also be done in C code... */
13034 movl pmode_cr3, %eax
13035 movl %eax, %cr3
13036 @@ -131,6 +134,7 @@ wakeup_code:
13037 movl pmode_cr0, %eax
13038 movl %eax, %cr0
13039 jmp pmode_return
13040 +# include "../../verify_cpu.S"
13041 #else
13042 pushw $0
13043 pushw trampoline_segment
13044 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13045 index 103b6ab..2004d0a 100644
13046 --- a/arch/x86/kernel/acpi/sleep.c
13047 +++ b/arch/x86/kernel/acpi/sleep.c
13048 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13049 header->trampoline_segment = trampoline_address() >> 4;
13050 #ifdef CONFIG_SMP
13051 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13052 +
13053 + pax_open_kernel();
13054 early_gdt_descr.address =
13055 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13056 + pax_close_kernel();
13057 +
13058 initial_gs = per_cpu_offset(smp_processor_id());
13059 #endif
13060 initial_code = (unsigned long)wakeup_long64;
13061 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13062 index 13ab720..95d5442 100644
13063 --- a/arch/x86/kernel/acpi/wakeup_32.S
13064 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13065 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13066 # and restore the stack ... but you need gdt for this to work
13067 movl saved_context_esp, %esp
13068
13069 - movl %cs:saved_magic, %eax
13070 - cmpl $0x12345678, %eax
13071 + cmpl $0x12345678, saved_magic
13072 jne bogus_magic
13073
13074 # jump to place where we left off
13075 - movl saved_eip, %eax
13076 - jmp *%eax
13077 + jmp *(saved_eip)
13078
13079 bogus_magic:
13080 jmp bogus_magic
13081 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13082 index 1f84794..e23f862 100644
13083 --- a/arch/x86/kernel/alternative.c
13084 +++ b/arch/x86/kernel/alternative.c
13085 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13086 */
13087 for (a = start; a < end; a++) {
13088 instr = (u8 *)&a->instr_offset + a->instr_offset;
13089 +
13090 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13091 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13092 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13093 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13094 +#endif
13095 +
13096 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13097 BUG_ON(a->replacementlen > a->instrlen);
13098 BUG_ON(a->instrlen > sizeof(insnbuf));
13099 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13100 for (poff = start; poff < end; poff++) {
13101 u8 *ptr = (u8 *)poff + *poff;
13102
13103 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13104 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13105 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13106 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13107 +#endif
13108 +
13109 if (!*poff || ptr < text || ptr >= text_end)
13110 continue;
13111 /* turn DS segment override prefix into lock prefix */
13112 - if (*ptr == 0x3e)
13113 + if (*ktla_ktva(ptr) == 0x3e)
13114 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13115 };
13116 mutex_unlock(&text_mutex);
13117 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13118 for (poff = start; poff < end; poff++) {
13119 u8 *ptr = (u8 *)poff + *poff;
13120
13121 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13122 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13123 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13124 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13125 +#endif
13126 +
13127 if (!*poff || ptr < text || ptr >= text_end)
13128 continue;
13129 /* turn lock prefix into DS segment override prefix */
13130 - if (*ptr == 0xf0)
13131 + if (*ktla_ktva(ptr) == 0xf0)
13132 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13133 };
13134 mutex_unlock(&text_mutex);
13135 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13136
13137 BUG_ON(p->len > MAX_PATCH_LEN);
13138 /* prep the buffer with the original instructions */
13139 - memcpy(insnbuf, p->instr, p->len);
13140 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13141 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13142 (unsigned long)p->instr, p->len);
13143
13144 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13145 if (smp_alt_once)
13146 free_init_pages("SMP alternatives",
13147 (unsigned long)__smp_locks,
13148 - (unsigned long)__smp_locks_end);
13149 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13150
13151 restart_nmi();
13152 }
13153 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13154 * instructions. And on the local CPU you need to be protected again NMI or MCE
13155 * handlers seeing an inconsistent instruction while you patch.
13156 */
13157 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13158 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13159 size_t len)
13160 {
13161 unsigned long flags;
13162 local_irq_save(flags);
13163 - memcpy(addr, opcode, len);
13164 +
13165 + pax_open_kernel();
13166 + memcpy(ktla_ktva(addr), opcode, len);
13167 sync_core();
13168 + pax_close_kernel();
13169 +
13170 local_irq_restore(flags);
13171 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13172 that causes hangs on some VIA CPUs. */
13173 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13174 */
13175 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13176 {
13177 - unsigned long flags;
13178 - char *vaddr;
13179 + unsigned char *vaddr = ktla_ktva(addr);
13180 struct page *pages[2];
13181 - int i;
13182 + size_t i;
13183
13184 if (!core_kernel_text((unsigned long)addr)) {
13185 - pages[0] = vmalloc_to_page(addr);
13186 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13187 + pages[0] = vmalloc_to_page(vaddr);
13188 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13189 } else {
13190 - pages[0] = virt_to_page(addr);
13191 + pages[0] = virt_to_page(vaddr);
13192 WARN_ON(!PageReserved(pages[0]));
13193 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13194 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13195 }
13196 BUG_ON(!pages[0]);
13197 - local_irq_save(flags);
13198 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13199 - if (pages[1])
13200 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13201 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13202 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13203 - clear_fixmap(FIX_TEXT_POKE0);
13204 - if (pages[1])
13205 - clear_fixmap(FIX_TEXT_POKE1);
13206 - local_flush_tlb();
13207 - sync_core();
13208 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13209 - that causes hangs on some VIA CPUs. */
13210 + text_poke_early(addr, opcode, len);
13211 for (i = 0; i < len; i++)
13212 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13213 - local_irq_restore(flags);
13214 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13215 return addr;
13216 }
13217
13218 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13219 index 2eec05b..fef012b 100644
13220 --- a/arch/x86/kernel/apic/apic.c
13221 +++ b/arch/x86/kernel/apic/apic.c
13222 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13223 /*
13224 * Debug level, exported for io_apic.c
13225 */
13226 -unsigned int apic_verbosity;
13227 +int apic_verbosity;
13228
13229 int pic_mode;
13230
13231 @@ -1908,7 +1908,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13232 apic_write(APIC_ESR, 0);
13233 v1 = apic_read(APIC_ESR);
13234 ack_APIC_irq();
13235 - atomic_inc(&irq_err_count);
13236 + atomic_inc_unchecked(&irq_err_count);
13237
13238 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13239 smp_processor_id(), v0 , v1);
13240 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13241 index fb07275..e06bb59 100644
13242 --- a/arch/x86/kernel/apic/io_apic.c
13243 +++ b/arch/x86/kernel/apic/io_apic.c
13244 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13245 }
13246 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13247
13248 -void lock_vector_lock(void)
13249 +void lock_vector_lock(void) __acquires(vector_lock)
13250 {
13251 /* Used to the online set of cpus does not change
13252 * during assign_irq_vector.
13253 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13254 raw_spin_lock(&vector_lock);
13255 }
13256
13257 -void unlock_vector_lock(void)
13258 +void unlock_vector_lock(void) __releases(vector_lock)
13259 {
13260 raw_spin_unlock(&vector_lock);
13261 }
13262 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13263 ack_APIC_irq();
13264 }
13265
13266 -atomic_t irq_mis_count;
13267 +atomic_unchecked_t irq_mis_count;
13268
13269 static void ack_apic_level(struct irq_data *data)
13270 {
13271 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13272 * at the cpu.
13273 */
13274 if (!(v & (1 << (i & 0x1f)))) {
13275 - atomic_inc(&irq_mis_count);
13276 + atomic_inc_unchecked(&irq_mis_count);
13277
13278 eoi_ioapic_irq(irq, cfg);
13279 }
13280 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13281 index f76623c..aab694f 100644
13282 --- a/arch/x86/kernel/apm_32.c
13283 +++ b/arch/x86/kernel/apm_32.c
13284 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13285 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13286 * even though they are called in protected mode.
13287 */
13288 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13289 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13290 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13291
13292 static const char driver_version[] = "1.16ac"; /* no spaces */
13293 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13294 BUG_ON(cpu != 0);
13295 gdt = get_cpu_gdt_table(cpu);
13296 save_desc_40 = gdt[0x40 / 8];
13297 +
13298 + pax_open_kernel();
13299 gdt[0x40 / 8] = bad_bios_desc;
13300 + pax_close_kernel();
13301
13302 apm_irq_save(flags);
13303 APM_DO_SAVE_SEGS;
13304 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13305 &call->esi);
13306 APM_DO_RESTORE_SEGS;
13307 apm_irq_restore(flags);
13308 +
13309 + pax_open_kernel();
13310 gdt[0x40 / 8] = save_desc_40;
13311 + pax_close_kernel();
13312 +
13313 put_cpu();
13314
13315 return call->eax & 0xff;
13316 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13317 BUG_ON(cpu != 0);
13318 gdt = get_cpu_gdt_table(cpu);
13319 save_desc_40 = gdt[0x40 / 8];
13320 +
13321 + pax_open_kernel();
13322 gdt[0x40 / 8] = bad_bios_desc;
13323 + pax_close_kernel();
13324
13325 apm_irq_save(flags);
13326 APM_DO_SAVE_SEGS;
13327 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13328 &call->eax);
13329 APM_DO_RESTORE_SEGS;
13330 apm_irq_restore(flags);
13331 +
13332 + pax_open_kernel();
13333 gdt[0x40 / 8] = save_desc_40;
13334 + pax_close_kernel();
13335 +
13336 put_cpu();
13337 return error;
13338 }
13339 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13340 * code to that CPU.
13341 */
13342 gdt = get_cpu_gdt_table(0);
13343 +
13344 + pax_open_kernel();
13345 set_desc_base(&gdt[APM_CS >> 3],
13346 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13347 set_desc_base(&gdt[APM_CS_16 >> 3],
13348 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13349 set_desc_base(&gdt[APM_DS >> 3],
13350 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13351 + pax_close_kernel();
13352
13353 proc_create("apm", 0, NULL, &apm_file_ops);
13354
13355 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13356 index 68de2dc..1f3c720 100644
13357 --- a/arch/x86/kernel/asm-offsets.c
13358 +++ b/arch/x86/kernel/asm-offsets.c
13359 @@ -33,6 +33,8 @@ void common(void) {
13360 OFFSET(TI_status, thread_info, status);
13361 OFFSET(TI_addr_limit, thread_info, addr_limit);
13362 OFFSET(TI_preempt_count, thread_info, preempt_count);
13363 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13364 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13365
13366 BLANK();
13367 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13368 @@ -53,8 +55,26 @@ void common(void) {
13369 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13370 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13371 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13372 +
13373 +#ifdef CONFIG_PAX_KERNEXEC
13374 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13375 #endif
13376
13377 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13378 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13379 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13380 +#ifdef CONFIG_X86_64
13381 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13382 +#endif
13383 +#endif
13384 +
13385 +#endif
13386 +
13387 + BLANK();
13388 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13389 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13390 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13391 +
13392 #ifdef CONFIG_XEN
13393 BLANK();
13394 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13395 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13396 index 834e897..dacddc8 100644
13397 --- a/arch/x86/kernel/asm-offsets_64.c
13398 +++ b/arch/x86/kernel/asm-offsets_64.c
13399 @@ -70,6 +70,7 @@ int main(void)
13400 BLANK();
13401 #undef ENTRY
13402
13403 + DEFINE(TSS_size, sizeof(struct tss_struct));
13404 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13405 BLANK();
13406
13407 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13408 index 25f24dc..4094a7f 100644
13409 --- a/arch/x86/kernel/cpu/Makefile
13410 +++ b/arch/x86/kernel/cpu/Makefile
13411 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13412 CFLAGS_REMOVE_perf_event.o = -pg
13413 endif
13414
13415 -# Make sure load_percpu_segment has no stackprotector
13416 -nostackp := $(call cc-option, -fno-stack-protector)
13417 -CFLAGS_common.o := $(nostackp)
13418 -
13419 obj-y := intel_cacheinfo.o scattered.o topology.o
13420 obj-y += proc.o capflags.o powerflags.o common.o
13421 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13422 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13423 index f4773f4..b3fb13c 100644
13424 --- a/arch/x86/kernel/cpu/amd.c
13425 +++ b/arch/x86/kernel/cpu/amd.c
13426 @@ -669,7 +669,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13427 unsigned int size)
13428 {
13429 /* AMD errata T13 (order #21922) */
13430 - if ((c->x86 == 6)) {
13431 + if (c->x86 == 6) {
13432 /* Duron Rev A0 */
13433 if (c->x86_model == 3 && c->x86_mask == 0)
13434 size = 64;
13435 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13436 index c0f7d68..aa418f9 100644
13437 --- a/arch/x86/kernel/cpu/common.c
13438 +++ b/arch/x86/kernel/cpu/common.c
13439 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13440
13441 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13442
13443 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13444 -#ifdef CONFIG_X86_64
13445 - /*
13446 - * We need valid kernel segments for data and code in long mode too
13447 - * IRET will check the segment types kkeil 2000/10/28
13448 - * Also sysret mandates a special GDT layout
13449 - *
13450 - * TLS descriptors are currently at a different place compared to i386.
13451 - * Hopefully nobody expects them at a fixed place (Wine?)
13452 - */
13453 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13454 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13455 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13456 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13457 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13458 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13459 -#else
13460 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13461 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13462 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13463 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13464 - /*
13465 - * Segments used for calling PnP BIOS have byte granularity.
13466 - * They code segments and data segments have fixed 64k limits,
13467 - * the transfer segment sizes are set at run time.
13468 - */
13469 - /* 32-bit code */
13470 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13471 - /* 16-bit code */
13472 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13473 - /* 16-bit data */
13474 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13475 - /* 16-bit data */
13476 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13477 - /* 16-bit data */
13478 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13479 - /*
13480 - * The APM segments have byte granularity and their bases
13481 - * are set at run time. All have 64k limits.
13482 - */
13483 - /* 32-bit code */
13484 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13485 - /* 16-bit code */
13486 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13487 - /* data */
13488 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13489 -
13490 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13491 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13492 - GDT_STACK_CANARY_INIT
13493 -#endif
13494 -} };
13495 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13496 -
13497 static int __init x86_xsave_setup(char *s)
13498 {
13499 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13500 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13501 {
13502 struct desc_ptr gdt_descr;
13503
13504 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13505 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13506 gdt_descr.size = GDT_SIZE - 1;
13507 load_gdt(&gdt_descr);
13508 /* Reload the per-cpu base */
13509 @@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13510 /* Filter out anything that depends on CPUID levels we don't have */
13511 filter_cpuid_features(c, true);
13512
13513 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13514 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13515 +#endif
13516 +
13517 /* If the model name is still unset, do table lookup. */
13518 if (!c->x86_model_id[0]) {
13519 const char *p;
13520 @@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
13521 }
13522 __setup("clearcpuid=", setup_disablecpuid);
13523
13524 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13525 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13526 +
13527 #ifdef CONFIG_X86_64
13528 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13529 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
13530 - (unsigned long) nmi_idt_table };
13531 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
13532
13533 DEFINE_PER_CPU_FIRST(union irq_stack_union,
13534 irq_stack_union) __aligned(PAGE_SIZE);
13535 @@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13536 EXPORT_PER_CPU_SYMBOL(current_task);
13537
13538 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13539 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13540 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13541 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13542
13543 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13544 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13545 {
13546 memset(regs, 0, sizeof(struct pt_regs));
13547 regs->fs = __KERNEL_PERCPU;
13548 - regs->gs = __KERNEL_STACK_CANARY;
13549 + savesegment(gs, regs->gs);
13550
13551 return regs;
13552 }
13553 @@ -1190,7 +1142,7 @@ void __cpuinit cpu_init(void)
13554 int i;
13555
13556 cpu = stack_smp_processor_id();
13557 - t = &per_cpu(init_tss, cpu);
13558 + t = init_tss + cpu;
13559 oist = &per_cpu(orig_ist, cpu);
13560
13561 #ifdef CONFIG_NUMA
13562 @@ -1216,7 +1168,7 @@ void __cpuinit cpu_init(void)
13563 switch_to_new_gdt(cpu);
13564 loadsegment(fs, 0);
13565
13566 - load_idt((const struct desc_ptr *)&idt_descr);
13567 + load_idt(&idt_descr);
13568
13569 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13570 syscall_init();
13571 @@ -1225,7 +1177,6 @@ void __cpuinit cpu_init(void)
13572 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13573 barrier();
13574
13575 - x86_configure_nx();
13576 if (cpu != 0)
13577 enable_x2apic();
13578
13579 @@ -1281,7 +1232,7 @@ void __cpuinit cpu_init(void)
13580 {
13581 int cpu = smp_processor_id();
13582 struct task_struct *curr = current;
13583 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13584 + struct tss_struct *t = init_tss + cpu;
13585 struct thread_struct *thread = &curr->thread;
13586
13587 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13588 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13589 index 3e6ff6c..54b4992 100644
13590 --- a/arch/x86/kernel/cpu/intel.c
13591 +++ b/arch/x86/kernel/cpu/intel.c
13592 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13593 * Update the IDT descriptor and reload the IDT so that
13594 * it uses the read-only mapped virtual address.
13595 */
13596 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13597 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13598 load_idt(&idt_descr);
13599 }
13600 #endif
13601 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13602 index 5a11ae2..a1a1c8a 100644
13603 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13604 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13605 @@ -42,6 +42,7 @@
13606 #include <asm/processor.h>
13607 #include <asm/mce.h>
13608 #include <asm/msr.h>
13609 +#include <asm/local.h>
13610
13611 #include "mce-internal.h"
13612
13613 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
13614 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13615 m->cs, m->ip);
13616
13617 - if (m->cs == __KERNEL_CS)
13618 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13619 print_symbol("{%s}", m->ip);
13620 pr_cont("\n");
13621 }
13622 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
13623
13624 #define PANIC_TIMEOUT 5 /* 5 seconds */
13625
13626 -static atomic_t mce_paniced;
13627 +static atomic_unchecked_t mce_paniced;
13628
13629 static int fake_panic;
13630 -static atomic_t mce_fake_paniced;
13631 +static atomic_unchecked_t mce_fake_paniced;
13632
13633 /* Panic in progress. Enable interrupts and wait for final IPI */
13634 static void wait_for_panic(void)
13635 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13636 /*
13637 * Make sure only one CPU runs in machine check panic
13638 */
13639 - if (atomic_inc_return(&mce_paniced) > 1)
13640 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13641 wait_for_panic();
13642 barrier();
13643
13644 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13645 console_verbose();
13646 } else {
13647 /* Don't log too much for fake panic */
13648 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13649 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13650 return;
13651 }
13652 /* First print corrected ones that are still unlogged */
13653 @@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
13654 * might have been modified by someone else.
13655 */
13656 rmb();
13657 - if (atomic_read(&mce_paniced))
13658 + if (atomic_read_unchecked(&mce_paniced))
13659 wait_for_panic();
13660 if (!monarch_timeout)
13661 goto out;
13662 @@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13663 }
13664
13665 /* Call the installed machine check handler for this CPU setup. */
13666 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13667 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13668 unexpected_machine_check;
13669
13670 /*
13671 @@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13672 return;
13673 }
13674
13675 + pax_open_kernel();
13676 machine_check_vector = do_machine_check;
13677 + pax_close_kernel();
13678
13679 __mcheck_cpu_init_generic();
13680 __mcheck_cpu_init_vendor(c);
13681 @@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13682 */
13683
13684 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13685 -static int mce_chrdev_open_count; /* #times opened */
13686 +static local_t mce_chrdev_open_count; /* #times opened */
13687 static int mce_chrdev_open_exclu; /* already open exclusive? */
13688
13689 static int mce_chrdev_open(struct inode *inode, struct file *file)
13690 @@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13691 spin_lock(&mce_chrdev_state_lock);
13692
13693 if (mce_chrdev_open_exclu ||
13694 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13695 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13696 spin_unlock(&mce_chrdev_state_lock);
13697
13698 return -EBUSY;
13699 @@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13700
13701 if (file->f_flags & O_EXCL)
13702 mce_chrdev_open_exclu = 1;
13703 - mce_chrdev_open_count++;
13704 + local_inc(&mce_chrdev_open_count);
13705
13706 spin_unlock(&mce_chrdev_state_lock);
13707
13708 @@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13709 {
13710 spin_lock(&mce_chrdev_state_lock);
13711
13712 - mce_chrdev_open_count--;
13713 + local_dec(&mce_chrdev_open_count);
13714 mce_chrdev_open_exclu = 0;
13715
13716 spin_unlock(&mce_chrdev_state_lock);
13717 @@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
13718 static void mce_reset(void)
13719 {
13720 cpu_missing = 0;
13721 - atomic_set(&mce_fake_paniced, 0);
13722 + atomic_set_unchecked(&mce_fake_paniced, 0);
13723 atomic_set(&mce_executing, 0);
13724 atomic_set(&mce_callin, 0);
13725 atomic_set(&global_nwo, 0);
13726 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13727 index 5c0e653..0882b0a 100644
13728 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13729 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13730 @@ -12,6 +12,7 @@
13731 #include <asm/system.h>
13732 #include <asm/mce.h>
13733 #include <asm/msr.h>
13734 +#include <asm/pgtable.h>
13735
13736 /* By default disabled */
13737 int mce_p5_enabled __read_mostly;
13738 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13739 if (!cpu_has(c, X86_FEATURE_MCE))
13740 return;
13741
13742 + pax_open_kernel();
13743 machine_check_vector = pentium_machine_check;
13744 + pax_close_kernel();
13745 /* Make sure the vector pointer is visible before we enable MCEs: */
13746 wmb();
13747
13748 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13749 index 54060f5..c1a7577 100644
13750 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13751 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13752 @@ -11,6 +11,7 @@
13753 #include <asm/system.h>
13754 #include <asm/mce.h>
13755 #include <asm/msr.h>
13756 +#include <asm/pgtable.h>
13757
13758 /* Machine check handler for WinChip C6: */
13759 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13760 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13761 {
13762 u32 lo, hi;
13763
13764 + pax_open_kernel();
13765 machine_check_vector = winchip_machine_check;
13766 + pax_close_kernel();
13767 /* Make sure the vector pointer is visible before we enable MCEs: */
13768 wmb();
13769
13770 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13771 index 6b96110..0da73eb 100644
13772 --- a/arch/x86/kernel/cpu/mtrr/main.c
13773 +++ b/arch/x86/kernel/cpu/mtrr/main.c
13774 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13775 u64 size_or_mask, size_and_mask;
13776 static bool mtrr_aps_delayed_init;
13777
13778 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13779 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13780
13781 const struct mtrr_ops *mtrr_if;
13782
13783 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13784 index df5e41f..816c719 100644
13785 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13786 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13787 @@ -25,7 +25,7 @@ struct mtrr_ops {
13788 int (*validate_add_page)(unsigned long base, unsigned long size,
13789 unsigned int type);
13790 int (*have_wrcomb)(void);
13791 -};
13792 +} __do_const;
13793
13794 extern int generic_get_free_region(unsigned long base, unsigned long size,
13795 int replace_reg);
13796 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13797 index 5adce10..99284ec 100644
13798 --- a/arch/x86/kernel/cpu/perf_event.c
13799 +++ b/arch/x86/kernel/cpu/perf_event.c
13800 @@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13801 break;
13802
13803 perf_callchain_store(entry, frame.return_address);
13804 - fp = frame.next_frame;
13805 + fp = (const void __force_user *)frame.next_frame;
13806 }
13807 }
13808
13809 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13810 index 13ad899..f642b9a 100644
13811 --- a/arch/x86/kernel/crash.c
13812 +++ b/arch/x86/kernel/crash.c
13813 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13814 {
13815 #ifdef CONFIG_X86_32
13816 struct pt_regs fixed_regs;
13817 -#endif
13818
13819 -#ifdef CONFIG_X86_32
13820 - if (!user_mode_vm(regs)) {
13821 + if (!user_mode(regs)) {
13822 crash_fixup_ss_esp(&fixed_regs, regs);
13823 regs = &fixed_regs;
13824 }
13825 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
13826 index 37250fe..bf2ec74 100644
13827 --- a/arch/x86/kernel/doublefault_32.c
13828 +++ b/arch/x86/kernel/doublefault_32.c
13829 @@ -11,7 +11,7 @@
13830
13831 #define DOUBLEFAULT_STACKSIZE (1024)
13832 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
13833 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
13834 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
13835
13836 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
13837
13838 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
13839 unsigned long gdt, tss;
13840
13841 store_gdt(&gdt_desc);
13842 - gdt = gdt_desc.address;
13843 + gdt = (unsigned long)gdt_desc.address;
13844
13845 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
13846
13847 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
13848 /* 0x2 bit is always set */
13849 .flags = X86_EFLAGS_SF | 0x2,
13850 .sp = STACK_START,
13851 - .es = __USER_DS,
13852 + .es = __KERNEL_DS,
13853 .cs = __KERNEL_CS,
13854 .ss = __KERNEL_DS,
13855 - .ds = __USER_DS,
13856 + .ds = __KERNEL_DS,
13857 .fs = __KERNEL_PERCPU,
13858
13859 .__cr3 = __pa_nodebug(swapper_pg_dir),
13860 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
13861 index 4025fe4..d8451c6 100644
13862 --- a/arch/x86/kernel/dumpstack.c
13863 +++ b/arch/x86/kernel/dumpstack.c
13864 @@ -2,6 +2,9 @@
13865 * Copyright (C) 1991, 1992 Linus Torvalds
13866 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
13867 */
13868 +#ifdef CONFIG_GRKERNSEC_HIDESYM
13869 +#define __INCLUDED_BY_HIDESYM 1
13870 +#endif
13871 #include <linux/kallsyms.h>
13872 #include <linux/kprobes.h>
13873 #include <linux/uaccess.h>
13874 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
13875 static void
13876 print_ftrace_graph_addr(unsigned long addr, void *data,
13877 const struct stacktrace_ops *ops,
13878 - struct thread_info *tinfo, int *graph)
13879 + struct task_struct *task, int *graph)
13880 {
13881 - struct task_struct *task = tinfo->task;
13882 unsigned long ret_addr;
13883 int index = task->curr_ret_stack;
13884
13885 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13886 static inline void
13887 print_ftrace_graph_addr(unsigned long addr, void *data,
13888 const struct stacktrace_ops *ops,
13889 - struct thread_info *tinfo, int *graph)
13890 + struct task_struct *task, int *graph)
13891 { }
13892 #endif
13893
13894 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13895 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
13896 */
13897
13898 -static inline int valid_stack_ptr(struct thread_info *tinfo,
13899 - void *p, unsigned int size, void *end)
13900 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
13901 {
13902 - void *t = tinfo;
13903 if (end) {
13904 if (p < end && p >= (end-THREAD_SIZE))
13905 return 1;
13906 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
13907 }
13908
13909 unsigned long
13910 -print_context_stack(struct thread_info *tinfo,
13911 +print_context_stack(struct task_struct *task, void *stack_start,
13912 unsigned long *stack, unsigned long bp,
13913 const struct stacktrace_ops *ops, void *data,
13914 unsigned long *end, int *graph)
13915 {
13916 struct stack_frame *frame = (struct stack_frame *)bp;
13917
13918 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
13919 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
13920 unsigned long addr;
13921
13922 addr = *stack;
13923 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
13924 } else {
13925 ops->address(data, addr, 0);
13926 }
13927 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13928 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13929 }
13930 stack++;
13931 }
13932 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
13933 EXPORT_SYMBOL_GPL(print_context_stack);
13934
13935 unsigned long
13936 -print_context_stack_bp(struct thread_info *tinfo,
13937 +print_context_stack_bp(struct task_struct *task, void *stack_start,
13938 unsigned long *stack, unsigned long bp,
13939 const struct stacktrace_ops *ops, void *data,
13940 unsigned long *end, int *graph)
13941 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13942 struct stack_frame *frame = (struct stack_frame *)bp;
13943 unsigned long *ret_addr = &frame->return_address;
13944
13945 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
13946 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
13947 unsigned long addr = *ret_addr;
13948
13949 if (!__kernel_text_address(addr))
13950 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13951 ops->address(data, addr, 1);
13952 frame = frame->next_frame;
13953 ret_addr = &frame->return_address;
13954 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13955 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13956 }
13957
13958 return (unsigned long)frame;
13959 @@ -186,7 +186,7 @@ void dump_stack(void)
13960
13961 bp = stack_frame(current, NULL);
13962 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13963 - current->pid, current->comm, print_tainted(),
13964 + task_pid_nr(current), current->comm, print_tainted(),
13965 init_utsname()->release,
13966 (int)strcspn(init_utsname()->version, " "),
13967 init_utsname()->version);
13968 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
13969 }
13970 EXPORT_SYMBOL_GPL(oops_begin);
13971
13972 +extern void gr_handle_kernel_exploit(void);
13973 +
13974 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13975 {
13976 if (regs && kexec_should_crash(current))
13977 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13978 panic("Fatal exception in interrupt");
13979 if (panic_on_oops)
13980 panic("Fatal exception");
13981 - do_exit(signr);
13982 +
13983 + gr_handle_kernel_exploit();
13984 +
13985 + do_group_exit(signr);
13986 }
13987
13988 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13989 @@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13990
13991 show_registers(regs);
13992 #ifdef CONFIG_X86_32
13993 - if (user_mode_vm(regs)) {
13994 + if (user_mode(regs)) {
13995 sp = regs->sp;
13996 ss = regs->ss & 0xffff;
13997 } else {
13998 @@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
13999 unsigned long flags = oops_begin();
14000 int sig = SIGSEGV;
14001
14002 - if (!user_mode_vm(regs))
14003 + if (!user_mode(regs))
14004 report_bug(regs->ip, regs);
14005
14006 if (__die(str, regs, err))
14007 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14008 index c99f9ed..2a15d80 100644
14009 --- a/arch/x86/kernel/dumpstack_32.c
14010 +++ b/arch/x86/kernel/dumpstack_32.c
14011 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14012 bp = stack_frame(task, regs);
14013
14014 for (;;) {
14015 - struct thread_info *context;
14016 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14017
14018 - context = (struct thread_info *)
14019 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14020 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14021 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14022
14023 - stack = (unsigned long *)context->previous_esp;
14024 - if (!stack)
14025 + if (stack_start == task_stack_page(task))
14026 break;
14027 + stack = *(unsigned long **)stack_start;
14028 if (ops->stack(data, "IRQ") < 0)
14029 break;
14030 touch_nmi_watchdog();
14031 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14032 * When in-kernel, we also print out the stack and code at the
14033 * time of the fault..
14034 */
14035 - if (!user_mode_vm(regs)) {
14036 + if (!user_mode(regs)) {
14037 unsigned int code_prologue = code_bytes * 43 / 64;
14038 unsigned int code_len = code_bytes;
14039 unsigned char c;
14040 u8 *ip;
14041 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14042
14043 printk(KERN_EMERG "Stack:\n");
14044 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14045
14046 printk(KERN_EMERG "Code: ");
14047
14048 - ip = (u8 *)regs->ip - code_prologue;
14049 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14050 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14051 /* try starting at IP */
14052 - ip = (u8 *)regs->ip;
14053 + ip = (u8 *)regs->ip + cs_base;
14054 code_len = code_len - code_prologue + 1;
14055 }
14056 for (i = 0; i < code_len; i++, ip++) {
14057 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14058 printk(KERN_CONT " Bad EIP value.");
14059 break;
14060 }
14061 - if (ip == (u8 *)regs->ip)
14062 + if (ip == (u8 *)regs->ip + cs_base)
14063 printk(KERN_CONT "<%02x> ", c);
14064 else
14065 printk(KERN_CONT "%02x ", c);
14066 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14067 {
14068 unsigned short ud2;
14069
14070 + ip = ktla_ktva(ip);
14071 if (ip < PAGE_OFFSET)
14072 return 0;
14073 if (probe_kernel_address((unsigned short *)ip, ud2))
14074 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14075
14076 return ud2 == 0x0b0f;
14077 }
14078 +
14079 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14080 +void pax_check_alloca(unsigned long size)
14081 +{
14082 + unsigned long sp = (unsigned long)&sp, stack_left;
14083 +
14084 + /* all kernel stacks are of the same size */
14085 + stack_left = sp & (THREAD_SIZE - 1);
14086 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14087 +}
14088 +EXPORT_SYMBOL(pax_check_alloca);
14089 +#endif
14090 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14091 index 17107bd..b2deecf 100644
14092 --- a/arch/x86/kernel/dumpstack_64.c
14093 +++ b/arch/x86/kernel/dumpstack_64.c
14094 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14095 unsigned long *irq_stack_end =
14096 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14097 unsigned used = 0;
14098 - struct thread_info *tinfo;
14099 int graph = 0;
14100 unsigned long dummy;
14101 + void *stack_start;
14102
14103 if (!task)
14104 task = current;
14105 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14106 * current stack address. If the stacks consist of nested
14107 * exceptions
14108 */
14109 - tinfo = task_thread_info(task);
14110 for (;;) {
14111 char *id;
14112 unsigned long *estack_end;
14113 +
14114 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14115 &used, &id);
14116
14117 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14118 if (ops->stack(data, id) < 0)
14119 break;
14120
14121 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14122 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14123 data, estack_end, &graph);
14124 ops->stack(data, "<EOE>");
14125 /*
14126 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14127 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14128 if (ops->stack(data, "IRQ") < 0)
14129 break;
14130 - bp = ops->walk_stack(tinfo, stack, bp,
14131 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14132 ops, data, irq_stack_end, &graph);
14133 /*
14134 * We link to the next stack (which would be
14135 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14136 /*
14137 * This handles the process stack:
14138 */
14139 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14140 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14141 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14142 put_cpu();
14143 }
14144 EXPORT_SYMBOL(dump_trace);
14145 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14146
14147 return ud2 == 0x0b0f;
14148 }
14149 +
14150 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14151 +void pax_check_alloca(unsigned long size)
14152 +{
14153 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14154 + unsigned cpu, used;
14155 + char *id;
14156 +
14157 + /* check the process stack first */
14158 + stack_start = (unsigned long)task_stack_page(current);
14159 + stack_end = stack_start + THREAD_SIZE;
14160 + if (likely(stack_start <= sp && sp < stack_end)) {
14161 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14162 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14163 + return;
14164 + }
14165 +
14166 + cpu = get_cpu();
14167 +
14168 + /* check the irq stacks */
14169 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14170 + stack_start = stack_end - IRQ_STACK_SIZE;
14171 + if (stack_start <= sp && sp < stack_end) {
14172 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14173 + put_cpu();
14174 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14175 + return;
14176 + }
14177 +
14178 + /* check the exception stacks */
14179 + used = 0;
14180 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14181 + stack_start = stack_end - EXCEPTION_STKSZ;
14182 + if (stack_end && stack_start <= sp && sp < stack_end) {
14183 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14184 + put_cpu();
14185 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14186 + return;
14187 + }
14188 +
14189 + put_cpu();
14190 +
14191 + /* unknown stack */
14192 + BUG();
14193 +}
14194 +EXPORT_SYMBOL(pax_check_alloca);
14195 +#endif
14196 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14197 index 9b9f18b..9fcaa04 100644
14198 --- a/arch/x86/kernel/early_printk.c
14199 +++ b/arch/x86/kernel/early_printk.c
14200 @@ -7,6 +7,7 @@
14201 #include <linux/pci_regs.h>
14202 #include <linux/pci_ids.h>
14203 #include <linux/errno.h>
14204 +#include <linux/sched.h>
14205 #include <asm/io.h>
14206 #include <asm/processor.h>
14207 #include <asm/fcntl.h>
14208 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14209 index 79d97e6..76aaad7 100644
14210 --- a/arch/x86/kernel/entry_32.S
14211 +++ b/arch/x86/kernel/entry_32.S
14212 @@ -98,12 +98,6 @@
14213 #endif
14214 .endm
14215
14216 -#ifdef CONFIG_VM86
14217 -#define resume_userspace_sig check_userspace
14218 -#else
14219 -#define resume_userspace_sig resume_userspace
14220 -#endif
14221 -
14222 /*
14223 * User gs save/restore
14224 *
14225 @@ -185,13 +179,146 @@
14226 /*CFI_REL_OFFSET gs, PT_GS*/
14227 .endm
14228 .macro SET_KERNEL_GS reg
14229 +
14230 +#ifdef CONFIG_CC_STACKPROTECTOR
14231 movl $(__KERNEL_STACK_CANARY), \reg
14232 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14233 + movl $(__USER_DS), \reg
14234 +#else
14235 + xorl \reg, \reg
14236 +#endif
14237 +
14238 movl \reg, %gs
14239 .endm
14240
14241 #endif /* CONFIG_X86_32_LAZY_GS */
14242
14243 -.macro SAVE_ALL
14244 +.macro pax_enter_kernel
14245 +#ifdef CONFIG_PAX_KERNEXEC
14246 + call pax_enter_kernel
14247 +#endif
14248 +.endm
14249 +
14250 +.macro pax_exit_kernel
14251 +#ifdef CONFIG_PAX_KERNEXEC
14252 + call pax_exit_kernel
14253 +#endif
14254 +.endm
14255 +
14256 +#ifdef CONFIG_PAX_KERNEXEC
14257 +ENTRY(pax_enter_kernel)
14258 +#ifdef CONFIG_PARAVIRT
14259 + pushl %eax
14260 + pushl %ecx
14261 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14262 + mov %eax, %esi
14263 +#else
14264 + mov %cr0, %esi
14265 +#endif
14266 + bts $16, %esi
14267 + jnc 1f
14268 + mov %cs, %esi
14269 + cmp $__KERNEL_CS, %esi
14270 + jz 3f
14271 + ljmp $__KERNEL_CS, $3f
14272 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14273 +2:
14274 +#ifdef CONFIG_PARAVIRT
14275 + mov %esi, %eax
14276 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14277 +#else
14278 + mov %esi, %cr0
14279 +#endif
14280 +3:
14281 +#ifdef CONFIG_PARAVIRT
14282 + popl %ecx
14283 + popl %eax
14284 +#endif
14285 + ret
14286 +ENDPROC(pax_enter_kernel)
14287 +
14288 +ENTRY(pax_exit_kernel)
14289 +#ifdef CONFIG_PARAVIRT
14290 + pushl %eax
14291 + pushl %ecx
14292 +#endif
14293 + mov %cs, %esi
14294 + cmp $__KERNEXEC_KERNEL_CS, %esi
14295 + jnz 2f
14296 +#ifdef CONFIG_PARAVIRT
14297 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14298 + mov %eax, %esi
14299 +#else
14300 + mov %cr0, %esi
14301 +#endif
14302 + btr $16, %esi
14303 + ljmp $__KERNEL_CS, $1f
14304 +1:
14305 +#ifdef CONFIG_PARAVIRT
14306 + mov %esi, %eax
14307 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14308 +#else
14309 + mov %esi, %cr0
14310 +#endif
14311 +2:
14312 +#ifdef CONFIG_PARAVIRT
14313 + popl %ecx
14314 + popl %eax
14315 +#endif
14316 + ret
14317 +ENDPROC(pax_exit_kernel)
14318 +#endif
14319 +
14320 +.macro pax_erase_kstack
14321 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14322 + call pax_erase_kstack
14323 +#endif
14324 +.endm
14325 +
14326 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14327 +/*
14328 + * ebp: thread_info
14329 + * ecx, edx: can be clobbered
14330 + */
14331 +ENTRY(pax_erase_kstack)
14332 + pushl %edi
14333 + pushl %eax
14334 +
14335 + mov TI_lowest_stack(%ebp), %edi
14336 + mov $-0xBEEF, %eax
14337 + std
14338 +
14339 +1: mov %edi, %ecx
14340 + and $THREAD_SIZE_asm - 1, %ecx
14341 + shr $2, %ecx
14342 + repne scasl
14343 + jecxz 2f
14344 +
14345 + cmp $2*16, %ecx
14346 + jc 2f
14347 +
14348 + mov $2*16, %ecx
14349 + repe scasl
14350 + jecxz 2f
14351 + jne 1b
14352 +
14353 +2: cld
14354 + mov %esp, %ecx
14355 + sub %edi, %ecx
14356 + shr $2, %ecx
14357 + rep stosl
14358 +
14359 + mov TI_task_thread_sp0(%ebp), %edi
14360 + sub $128, %edi
14361 + mov %edi, TI_lowest_stack(%ebp)
14362 +
14363 + popl %eax
14364 + popl %edi
14365 + ret
14366 +ENDPROC(pax_erase_kstack)
14367 +#endif
14368 +
14369 +.macro __SAVE_ALL _DS
14370 cld
14371 PUSH_GS
14372 pushl_cfi %fs
14373 @@ -214,7 +341,7 @@
14374 CFI_REL_OFFSET ecx, 0
14375 pushl_cfi %ebx
14376 CFI_REL_OFFSET ebx, 0
14377 - movl $(__USER_DS), %edx
14378 + movl $\_DS, %edx
14379 movl %edx, %ds
14380 movl %edx, %es
14381 movl $(__KERNEL_PERCPU), %edx
14382 @@ -222,6 +349,15 @@
14383 SET_KERNEL_GS %edx
14384 .endm
14385
14386 +.macro SAVE_ALL
14387 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14388 + __SAVE_ALL __KERNEL_DS
14389 + pax_enter_kernel
14390 +#else
14391 + __SAVE_ALL __USER_DS
14392 +#endif
14393 +.endm
14394 +
14395 .macro RESTORE_INT_REGS
14396 popl_cfi %ebx
14397 CFI_RESTORE ebx
14398 @@ -307,7 +443,7 @@ ENTRY(ret_from_fork)
14399 popfl_cfi
14400 jmp syscall_exit
14401 CFI_ENDPROC
14402 -END(ret_from_fork)
14403 +ENDPROC(ret_from_fork)
14404
14405 /*
14406 * Interrupt exit functions should be protected against kprobes
14407 @@ -327,12 +463,29 @@ ret_from_exception:
14408 preempt_stop(CLBR_ANY)
14409 ret_from_intr:
14410 GET_THREAD_INFO(%ebp)
14411 -check_userspace:
14412 +resume_userspace_sig:
14413 +#ifdef CONFIG_VM86
14414 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
14415 movb PT_CS(%esp), %al
14416 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14417 +#else
14418 + /*
14419 + * We can be coming here from a syscall done in the kernel space,
14420 + * e.g. a failed kernel_execve().
14421 + */
14422 + movl PT_CS(%esp), %eax
14423 + andl $SEGMENT_RPL_MASK, %eax
14424 +#endif
14425 cmpl $USER_RPL, %eax
14426 +
14427 +#ifdef CONFIG_PAX_KERNEXEC
14428 + jae resume_userspace
14429 +
14430 + PAX_EXIT_KERNEL
14431 + jmp resume_kernel
14432 +#else
14433 jb resume_kernel # not returning to v8086 or userspace
14434 +#endif
14435
14436 ENTRY(resume_userspace)
14437 LOCKDEP_SYS_EXIT
14438 @@ -344,8 +497,8 @@ ENTRY(resume_userspace)
14439 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14440 # int/exception return?
14441 jne work_pending
14442 - jmp restore_all
14443 -END(ret_from_exception)
14444 + jmp restore_all_pax
14445 +ENDPROC(ret_from_exception)
14446
14447 #ifdef CONFIG_PREEMPT
14448 ENTRY(resume_kernel)
14449 @@ -360,7 +513,7 @@ need_resched:
14450 jz restore_all
14451 call preempt_schedule_irq
14452 jmp need_resched
14453 -END(resume_kernel)
14454 +ENDPROC(resume_kernel)
14455 #endif
14456 CFI_ENDPROC
14457 /*
14458 @@ -394,23 +547,34 @@ sysenter_past_esp:
14459 /*CFI_REL_OFFSET cs, 0*/
14460 /*
14461 * Push current_thread_info()->sysenter_return to the stack.
14462 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14463 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
14464 */
14465 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14466 + pushl_cfi $0
14467 CFI_REL_OFFSET eip, 0
14468
14469 pushl_cfi %eax
14470 SAVE_ALL
14471 + GET_THREAD_INFO(%ebp)
14472 + movl TI_sysenter_return(%ebp),%ebp
14473 + movl %ebp,PT_EIP(%esp)
14474 ENABLE_INTERRUPTS(CLBR_NONE)
14475
14476 /*
14477 * Load the potential sixth argument from user stack.
14478 * Careful about security.
14479 */
14480 + movl PT_OLDESP(%esp),%ebp
14481 +
14482 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14483 + mov PT_OLDSS(%esp),%ds
14484 +1: movl %ds:(%ebp),%ebp
14485 + push %ss
14486 + pop %ds
14487 +#else
14488 cmpl $__PAGE_OFFSET-3,%ebp
14489 jae syscall_fault
14490 1: movl (%ebp),%ebp
14491 +#endif
14492 +
14493 movl %ebp,PT_EBP(%esp)
14494 .section __ex_table,"a"
14495 .align 4
14496 @@ -433,12 +597,24 @@ sysenter_do_call:
14497 testl $_TIF_ALLWORK_MASK, %ecx
14498 jne sysexit_audit
14499 sysenter_exit:
14500 +
14501 +#ifdef CONFIG_PAX_RANDKSTACK
14502 + pushl_cfi %eax
14503 + movl %esp, %eax
14504 + call pax_randomize_kstack
14505 + popl_cfi %eax
14506 +#endif
14507 +
14508 + pax_erase_kstack
14509 +
14510 /* if something modifies registers it must also disable sysexit */
14511 movl PT_EIP(%esp), %edx
14512 movl PT_OLDESP(%esp), %ecx
14513 xorl %ebp,%ebp
14514 TRACE_IRQS_ON
14515 1: mov PT_FS(%esp), %fs
14516 +2: mov PT_DS(%esp), %ds
14517 +3: mov PT_ES(%esp), %es
14518 PTGS_TO_GS
14519 ENABLE_INTERRUPTS_SYSEXIT
14520
14521 @@ -455,6 +631,9 @@ sysenter_audit:
14522 movl %eax,%edx /* 2nd arg: syscall number */
14523 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14524 call __audit_syscall_entry
14525 +
14526 + pax_erase_kstack
14527 +
14528 pushl_cfi %ebx
14529 movl PT_EAX(%esp),%eax /* reload syscall number */
14530 jmp sysenter_do_call
14531 @@ -480,11 +659,17 @@ sysexit_audit:
14532
14533 CFI_ENDPROC
14534 .pushsection .fixup,"ax"
14535 -2: movl $0,PT_FS(%esp)
14536 +4: movl $0,PT_FS(%esp)
14537 + jmp 1b
14538 +5: movl $0,PT_DS(%esp)
14539 + jmp 1b
14540 +6: movl $0,PT_ES(%esp)
14541 jmp 1b
14542 .section __ex_table,"a"
14543 .align 4
14544 - .long 1b,2b
14545 + .long 1b,4b
14546 + .long 2b,5b
14547 + .long 3b,6b
14548 .popsection
14549 PTGS_TO_GS_EX
14550 ENDPROC(ia32_sysenter_target)
14551 @@ -517,6 +702,15 @@ syscall_exit:
14552 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14553 jne syscall_exit_work
14554
14555 +restore_all_pax:
14556 +
14557 +#ifdef CONFIG_PAX_RANDKSTACK
14558 + movl %esp, %eax
14559 + call pax_randomize_kstack
14560 +#endif
14561 +
14562 + pax_erase_kstack
14563 +
14564 restore_all:
14565 TRACE_IRQS_IRET
14566 restore_all_notrace:
14567 @@ -576,14 +770,34 @@ ldt_ss:
14568 * compensating for the offset by changing to the ESPFIX segment with
14569 * a base address that matches for the difference.
14570 */
14571 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14572 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14573 mov %esp, %edx /* load kernel esp */
14574 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14575 mov %dx, %ax /* eax: new kernel esp */
14576 sub %eax, %edx /* offset (low word is 0) */
14577 +#ifdef CONFIG_SMP
14578 + movl PER_CPU_VAR(cpu_number), %ebx
14579 + shll $PAGE_SHIFT_asm, %ebx
14580 + addl $cpu_gdt_table, %ebx
14581 +#else
14582 + movl $cpu_gdt_table, %ebx
14583 +#endif
14584 shr $16, %edx
14585 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14586 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14587 +
14588 +#ifdef CONFIG_PAX_KERNEXEC
14589 + mov %cr0, %esi
14590 + btr $16, %esi
14591 + mov %esi, %cr0
14592 +#endif
14593 +
14594 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14595 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14596 +
14597 +#ifdef CONFIG_PAX_KERNEXEC
14598 + bts $16, %esi
14599 + mov %esi, %cr0
14600 +#endif
14601 +
14602 pushl_cfi $__ESPFIX_SS
14603 pushl_cfi %eax /* new kernel esp */
14604 /* Disable interrupts, but do not irqtrace this section: we
14605 @@ -612,38 +826,30 @@ work_resched:
14606 movl TI_flags(%ebp), %ecx
14607 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14608 # than syscall tracing?
14609 - jz restore_all
14610 + jz restore_all_pax
14611 testb $_TIF_NEED_RESCHED, %cl
14612 jnz work_resched
14613
14614 work_notifysig: # deal with pending signals and
14615 # notify-resume requests
14616 + movl %esp, %eax
14617 #ifdef CONFIG_VM86
14618 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14619 - movl %esp, %eax
14620 - jne work_notifysig_v86 # returning to kernel-space or
14621 + jz 1f # returning to kernel-space or
14622 # vm86-space
14623 - TRACE_IRQS_ON
14624 - ENABLE_INTERRUPTS(CLBR_NONE)
14625 - xorl %edx, %edx
14626 - call do_notify_resume
14627 - jmp resume_userspace_sig
14628
14629 - ALIGN
14630 -work_notifysig_v86:
14631 pushl_cfi %ecx # save ti_flags for do_notify_resume
14632 call save_v86_state # %eax contains pt_regs pointer
14633 popl_cfi %ecx
14634 movl %eax, %esp
14635 -#else
14636 - movl %esp, %eax
14637 +1:
14638 #endif
14639 TRACE_IRQS_ON
14640 ENABLE_INTERRUPTS(CLBR_NONE)
14641 xorl %edx, %edx
14642 call do_notify_resume
14643 jmp resume_userspace_sig
14644 -END(work_pending)
14645 +ENDPROC(work_pending)
14646
14647 # perform syscall exit tracing
14648 ALIGN
14649 @@ -651,11 +857,14 @@ syscall_trace_entry:
14650 movl $-ENOSYS,PT_EAX(%esp)
14651 movl %esp, %eax
14652 call syscall_trace_enter
14653 +
14654 + pax_erase_kstack
14655 +
14656 /* What it returned is what we'll actually use. */
14657 cmpl $(NR_syscalls), %eax
14658 jnae syscall_call
14659 jmp syscall_exit
14660 -END(syscall_trace_entry)
14661 +ENDPROC(syscall_trace_entry)
14662
14663 # perform syscall exit tracing
14664 ALIGN
14665 @@ -668,20 +877,24 @@ syscall_exit_work:
14666 movl %esp, %eax
14667 call syscall_trace_leave
14668 jmp resume_userspace
14669 -END(syscall_exit_work)
14670 +ENDPROC(syscall_exit_work)
14671 CFI_ENDPROC
14672
14673 RING0_INT_FRAME # can't unwind into user space anyway
14674 syscall_fault:
14675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14676 + push %ss
14677 + pop %ds
14678 +#endif
14679 GET_THREAD_INFO(%ebp)
14680 movl $-EFAULT,PT_EAX(%esp)
14681 jmp resume_userspace
14682 -END(syscall_fault)
14683 +ENDPROC(syscall_fault)
14684
14685 syscall_badsys:
14686 movl $-ENOSYS,PT_EAX(%esp)
14687 jmp resume_userspace
14688 -END(syscall_badsys)
14689 +ENDPROC(syscall_badsys)
14690 CFI_ENDPROC
14691 /*
14692 * End of kprobes section
14693 @@ -753,6 +966,36 @@ ENTRY(ptregs_clone)
14694 CFI_ENDPROC
14695 ENDPROC(ptregs_clone)
14696
14697 + ALIGN;
14698 +ENTRY(kernel_execve)
14699 + CFI_STARTPROC
14700 + pushl_cfi %ebp
14701 + sub $PT_OLDSS+4,%esp
14702 + pushl_cfi %edi
14703 + pushl_cfi %ecx
14704 + pushl_cfi %eax
14705 + lea 3*4(%esp),%edi
14706 + mov $PT_OLDSS/4+1,%ecx
14707 + xorl %eax,%eax
14708 + rep stosl
14709 + popl_cfi %eax
14710 + popl_cfi %ecx
14711 + popl_cfi %edi
14712 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14713 + pushl_cfi %esp
14714 + call sys_execve
14715 + add $4,%esp
14716 + CFI_ADJUST_CFA_OFFSET -4
14717 + GET_THREAD_INFO(%ebp)
14718 + test %eax,%eax
14719 + jz syscall_exit
14720 + add $PT_OLDSS+4,%esp
14721 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14722 + popl_cfi %ebp
14723 + ret
14724 + CFI_ENDPROC
14725 +ENDPROC(kernel_execve)
14726 +
14727 .macro FIXUP_ESPFIX_STACK
14728 /*
14729 * Switch back for ESPFIX stack to the normal zerobased stack
14730 @@ -762,8 +1005,15 @@ ENDPROC(ptregs_clone)
14731 * normal stack and adjusts ESP with the matching offset.
14732 */
14733 /* fixup the stack */
14734 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14735 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14736 +#ifdef CONFIG_SMP
14737 + movl PER_CPU_VAR(cpu_number), %ebx
14738 + shll $PAGE_SHIFT_asm, %ebx
14739 + addl $cpu_gdt_table, %ebx
14740 +#else
14741 + movl $cpu_gdt_table, %ebx
14742 +#endif
14743 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14744 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14745 shl $16, %eax
14746 addl %esp, %eax /* the adjusted stack pointer */
14747 pushl_cfi $__KERNEL_DS
14748 @@ -816,7 +1066,7 @@ vector=vector+1
14749 .endr
14750 2: jmp common_interrupt
14751 .endr
14752 -END(irq_entries_start)
14753 +ENDPROC(irq_entries_start)
14754
14755 .previous
14756 END(interrupt)
14757 @@ -864,7 +1114,7 @@ ENTRY(coprocessor_error)
14758 pushl_cfi $do_coprocessor_error
14759 jmp error_code
14760 CFI_ENDPROC
14761 -END(coprocessor_error)
14762 +ENDPROC(coprocessor_error)
14763
14764 ENTRY(simd_coprocessor_error)
14765 RING0_INT_FRAME
14766 @@ -885,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
14767 #endif
14768 jmp error_code
14769 CFI_ENDPROC
14770 -END(simd_coprocessor_error)
14771 +ENDPROC(simd_coprocessor_error)
14772
14773 ENTRY(device_not_available)
14774 RING0_INT_FRAME
14775 @@ -893,7 +1143,7 @@ ENTRY(device_not_available)
14776 pushl_cfi $do_device_not_available
14777 jmp error_code
14778 CFI_ENDPROC
14779 -END(device_not_available)
14780 +ENDPROC(device_not_available)
14781
14782 #ifdef CONFIG_PARAVIRT
14783 ENTRY(native_iret)
14784 @@ -902,12 +1152,12 @@ ENTRY(native_iret)
14785 .align 4
14786 .long native_iret, iret_exc
14787 .previous
14788 -END(native_iret)
14789 +ENDPROC(native_iret)
14790
14791 ENTRY(native_irq_enable_sysexit)
14792 sti
14793 sysexit
14794 -END(native_irq_enable_sysexit)
14795 +ENDPROC(native_irq_enable_sysexit)
14796 #endif
14797
14798 ENTRY(overflow)
14799 @@ -916,7 +1166,7 @@ ENTRY(overflow)
14800 pushl_cfi $do_overflow
14801 jmp error_code
14802 CFI_ENDPROC
14803 -END(overflow)
14804 +ENDPROC(overflow)
14805
14806 ENTRY(bounds)
14807 RING0_INT_FRAME
14808 @@ -924,7 +1174,7 @@ ENTRY(bounds)
14809 pushl_cfi $do_bounds
14810 jmp error_code
14811 CFI_ENDPROC
14812 -END(bounds)
14813 +ENDPROC(bounds)
14814
14815 ENTRY(invalid_op)
14816 RING0_INT_FRAME
14817 @@ -932,7 +1182,7 @@ ENTRY(invalid_op)
14818 pushl_cfi $do_invalid_op
14819 jmp error_code
14820 CFI_ENDPROC
14821 -END(invalid_op)
14822 +ENDPROC(invalid_op)
14823
14824 ENTRY(coprocessor_segment_overrun)
14825 RING0_INT_FRAME
14826 @@ -940,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
14827 pushl_cfi $do_coprocessor_segment_overrun
14828 jmp error_code
14829 CFI_ENDPROC
14830 -END(coprocessor_segment_overrun)
14831 +ENDPROC(coprocessor_segment_overrun)
14832
14833 ENTRY(invalid_TSS)
14834 RING0_EC_FRAME
14835 pushl_cfi $do_invalid_TSS
14836 jmp error_code
14837 CFI_ENDPROC
14838 -END(invalid_TSS)
14839 +ENDPROC(invalid_TSS)
14840
14841 ENTRY(segment_not_present)
14842 RING0_EC_FRAME
14843 pushl_cfi $do_segment_not_present
14844 jmp error_code
14845 CFI_ENDPROC
14846 -END(segment_not_present)
14847 +ENDPROC(segment_not_present)
14848
14849 ENTRY(stack_segment)
14850 RING0_EC_FRAME
14851 pushl_cfi $do_stack_segment
14852 jmp error_code
14853 CFI_ENDPROC
14854 -END(stack_segment)
14855 +ENDPROC(stack_segment)
14856
14857 ENTRY(alignment_check)
14858 RING0_EC_FRAME
14859 pushl_cfi $do_alignment_check
14860 jmp error_code
14861 CFI_ENDPROC
14862 -END(alignment_check)
14863 +ENDPROC(alignment_check)
14864
14865 ENTRY(divide_error)
14866 RING0_INT_FRAME
14867 @@ -976,7 +1226,7 @@ ENTRY(divide_error)
14868 pushl_cfi $do_divide_error
14869 jmp error_code
14870 CFI_ENDPROC
14871 -END(divide_error)
14872 +ENDPROC(divide_error)
14873
14874 #ifdef CONFIG_X86_MCE
14875 ENTRY(machine_check)
14876 @@ -985,7 +1235,7 @@ ENTRY(machine_check)
14877 pushl_cfi machine_check_vector
14878 jmp error_code
14879 CFI_ENDPROC
14880 -END(machine_check)
14881 +ENDPROC(machine_check)
14882 #endif
14883
14884 ENTRY(spurious_interrupt_bug)
14885 @@ -994,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
14886 pushl_cfi $do_spurious_interrupt_bug
14887 jmp error_code
14888 CFI_ENDPROC
14889 -END(spurious_interrupt_bug)
14890 +ENDPROC(spurious_interrupt_bug)
14891 /*
14892 * End of kprobes section
14893 */
14894 @@ -1109,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
14895
14896 ENTRY(mcount)
14897 ret
14898 -END(mcount)
14899 +ENDPROC(mcount)
14900
14901 ENTRY(ftrace_caller)
14902 cmpl $0, function_trace_stop
14903 @@ -1138,7 +1388,7 @@ ftrace_graph_call:
14904 .globl ftrace_stub
14905 ftrace_stub:
14906 ret
14907 -END(ftrace_caller)
14908 +ENDPROC(ftrace_caller)
14909
14910 #else /* ! CONFIG_DYNAMIC_FTRACE */
14911
14912 @@ -1174,7 +1424,7 @@ trace:
14913 popl %ecx
14914 popl %eax
14915 jmp ftrace_stub
14916 -END(mcount)
14917 +ENDPROC(mcount)
14918 #endif /* CONFIG_DYNAMIC_FTRACE */
14919 #endif /* CONFIG_FUNCTION_TRACER */
14920
14921 @@ -1195,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
14922 popl %ecx
14923 popl %eax
14924 ret
14925 -END(ftrace_graph_caller)
14926 +ENDPROC(ftrace_graph_caller)
14927
14928 .globl return_to_handler
14929 return_to_handler:
14930 @@ -1250,15 +1500,18 @@ error_code:
14931 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
14932 REG_TO_PTGS %ecx
14933 SET_KERNEL_GS %ecx
14934 - movl $(__USER_DS), %ecx
14935 + movl $(__KERNEL_DS), %ecx
14936 movl %ecx, %ds
14937 movl %ecx, %es
14938 +
14939 + pax_enter_kernel
14940 +
14941 TRACE_IRQS_OFF
14942 movl %esp,%eax # pt_regs pointer
14943 call *%edi
14944 jmp ret_from_exception
14945 CFI_ENDPROC
14946 -END(page_fault)
14947 +ENDPROC(page_fault)
14948
14949 /*
14950 * Debug traps and NMI can happen at the one SYSENTER instruction
14951 @@ -1300,7 +1553,7 @@ debug_stack_correct:
14952 call do_debug
14953 jmp ret_from_exception
14954 CFI_ENDPROC
14955 -END(debug)
14956 +ENDPROC(debug)
14957
14958 /*
14959 * NMI is doubly nasty. It can happen _while_ we're handling
14960 @@ -1337,6 +1590,9 @@ nmi_stack_correct:
14961 xorl %edx,%edx # zero error code
14962 movl %esp,%eax # pt_regs pointer
14963 call do_nmi
14964 +
14965 + pax_exit_kernel
14966 +
14967 jmp restore_all_notrace
14968 CFI_ENDPROC
14969
14970 @@ -1373,12 +1629,15 @@ nmi_espfix_stack:
14971 FIXUP_ESPFIX_STACK # %eax == %esp
14972 xorl %edx,%edx # zero error code
14973 call do_nmi
14974 +
14975 + pax_exit_kernel
14976 +
14977 RESTORE_REGS
14978 lss 12+4(%esp), %esp # back to espfix stack
14979 CFI_ADJUST_CFA_OFFSET -24
14980 jmp irq_return
14981 CFI_ENDPROC
14982 -END(nmi)
14983 +ENDPROC(nmi)
14984
14985 ENTRY(int3)
14986 RING0_INT_FRAME
14987 @@ -1390,14 +1649,14 @@ ENTRY(int3)
14988 call do_int3
14989 jmp ret_from_exception
14990 CFI_ENDPROC
14991 -END(int3)
14992 +ENDPROC(int3)
14993
14994 ENTRY(general_protection)
14995 RING0_EC_FRAME
14996 pushl_cfi $do_general_protection
14997 jmp error_code
14998 CFI_ENDPROC
14999 -END(general_protection)
15000 +ENDPROC(general_protection)
15001
15002 #ifdef CONFIG_KVM_GUEST
15003 ENTRY(async_page_fault)
15004 @@ -1405,7 +1664,7 @@ ENTRY(async_page_fault)
15005 pushl_cfi $do_async_page_fault
15006 jmp error_code
15007 CFI_ENDPROC
15008 -END(async_page_fault)
15009 +ENDPROC(async_page_fault)
15010 #endif
15011
15012 /*
15013 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15014 index 1333d98..b340ca2 100644
15015 --- a/arch/x86/kernel/entry_64.S
15016 +++ b/arch/x86/kernel/entry_64.S
15017 @@ -56,6 +56,8 @@
15018 #include <asm/ftrace.h>
15019 #include <asm/percpu.h>
15020 #include <linux/err.h>
15021 +#include <asm/pgtable.h>
15022 +#include <asm/alternative-asm.h>
15023
15024 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15025 #include <linux/elf-em.h>
15026 @@ -69,8 +71,9 @@
15027 #ifdef CONFIG_FUNCTION_TRACER
15028 #ifdef CONFIG_DYNAMIC_FTRACE
15029 ENTRY(mcount)
15030 + pax_force_retaddr
15031 retq
15032 -END(mcount)
15033 +ENDPROC(mcount)
15034
15035 ENTRY(ftrace_caller)
15036 cmpl $0, function_trace_stop
15037 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15038 #endif
15039
15040 GLOBAL(ftrace_stub)
15041 + pax_force_retaddr
15042 retq
15043 -END(ftrace_caller)
15044 +ENDPROC(ftrace_caller)
15045
15046 #else /* ! CONFIG_DYNAMIC_FTRACE */
15047 ENTRY(mcount)
15048 @@ -113,6 +117,7 @@ ENTRY(mcount)
15049 #endif
15050
15051 GLOBAL(ftrace_stub)
15052 + pax_force_retaddr
15053 retq
15054
15055 trace:
15056 @@ -122,12 +127,13 @@ trace:
15057 movq 8(%rbp), %rsi
15058 subq $MCOUNT_INSN_SIZE, %rdi
15059
15060 + pax_force_fptr ftrace_trace_function
15061 call *ftrace_trace_function
15062
15063 MCOUNT_RESTORE_FRAME
15064
15065 jmp ftrace_stub
15066 -END(mcount)
15067 +ENDPROC(mcount)
15068 #endif /* CONFIG_DYNAMIC_FTRACE */
15069 #endif /* CONFIG_FUNCTION_TRACER */
15070
15071 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15072
15073 MCOUNT_RESTORE_FRAME
15074
15075 + pax_force_retaddr
15076 retq
15077 -END(ftrace_graph_caller)
15078 +ENDPROC(ftrace_graph_caller)
15079
15080 GLOBAL(return_to_handler)
15081 subq $24, %rsp
15082 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15083 movq 8(%rsp), %rdx
15084 movq (%rsp), %rax
15085 addq $24, %rsp
15086 + pax_force_fptr %rdi
15087 jmp *%rdi
15088 #endif
15089
15090 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15091 ENDPROC(native_usergs_sysret64)
15092 #endif /* CONFIG_PARAVIRT */
15093
15094 + .macro ljmpq sel, off
15095 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15096 + .byte 0x48; ljmp *1234f(%rip)
15097 + .pushsection .rodata
15098 + .align 16
15099 + 1234: .quad \off; .word \sel
15100 + .popsection
15101 +#else
15102 + pushq $\sel
15103 + pushq $\off
15104 + lretq
15105 +#endif
15106 + .endm
15107 +
15108 + .macro pax_enter_kernel
15109 + pax_set_fptr_mask
15110 +#ifdef CONFIG_PAX_KERNEXEC
15111 + call pax_enter_kernel
15112 +#endif
15113 + .endm
15114 +
15115 + .macro pax_exit_kernel
15116 +#ifdef CONFIG_PAX_KERNEXEC
15117 + call pax_exit_kernel
15118 +#endif
15119 + .endm
15120 +
15121 +#ifdef CONFIG_PAX_KERNEXEC
15122 +ENTRY(pax_enter_kernel)
15123 + pushq %rdi
15124 +
15125 +#ifdef CONFIG_PARAVIRT
15126 + PV_SAVE_REGS(CLBR_RDI)
15127 +#endif
15128 +
15129 + GET_CR0_INTO_RDI
15130 + bts $16,%rdi
15131 + jnc 3f
15132 + mov %cs,%edi
15133 + cmp $__KERNEL_CS,%edi
15134 + jnz 2f
15135 +1:
15136 +
15137 +#ifdef CONFIG_PARAVIRT
15138 + PV_RESTORE_REGS(CLBR_RDI)
15139 +#endif
15140 +
15141 + popq %rdi
15142 + pax_force_retaddr
15143 + retq
15144 +
15145 +2: ljmpq __KERNEL_CS,1f
15146 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15147 +4: SET_RDI_INTO_CR0
15148 + jmp 1b
15149 +ENDPROC(pax_enter_kernel)
15150 +
15151 +ENTRY(pax_exit_kernel)
15152 + pushq %rdi
15153 +
15154 +#ifdef CONFIG_PARAVIRT
15155 + PV_SAVE_REGS(CLBR_RDI)
15156 +#endif
15157 +
15158 + mov %cs,%rdi
15159 + cmp $__KERNEXEC_KERNEL_CS,%edi
15160 + jz 2f
15161 +1:
15162 +
15163 +#ifdef CONFIG_PARAVIRT
15164 + PV_RESTORE_REGS(CLBR_RDI);
15165 +#endif
15166 +
15167 + popq %rdi
15168 + pax_force_retaddr
15169 + retq
15170 +
15171 +2: GET_CR0_INTO_RDI
15172 + btr $16,%rdi
15173 + ljmpq __KERNEL_CS,3f
15174 +3: SET_RDI_INTO_CR0
15175 + jmp 1b
15176 +#ifdef CONFIG_PARAVIRT
15177 + PV_RESTORE_REGS(CLBR_RDI);
15178 +#endif
15179 +
15180 + popq %rdi
15181 + pax_force_retaddr
15182 + retq
15183 +ENDPROC(pax_exit_kernel)
15184 +#endif
15185 +
15186 + .macro pax_enter_kernel_user
15187 + pax_set_fptr_mask
15188 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15189 + call pax_enter_kernel_user
15190 +#endif
15191 + .endm
15192 +
15193 + .macro pax_exit_kernel_user
15194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15195 + call pax_exit_kernel_user
15196 +#endif
15197 +#ifdef CONFIG_PAX_RANDKSTACK
15198 + pushq %rax
15199 + call pax_randomize_kstack
15200 + popq %rax
15201 +#endif
15202 + .endm
15203 +
15204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15205 +ENTRY(pax_enter_kernel_user)
15206 + pushq %rdi
15207 + pushq %rbx
15208 +
15209 +#ifdef CONFIG_PARAVIRT
15210 + PV_SAVE_REGS(CLBR_RDI)
15211 +#endif
15212 +
15213 + GET_CR3_INTO_RDI
15214 + mov %rdi,%rbx
15215 + add $__START_KERNEL_map,%rbx
15216 + sub phys_base(%rip),%rbx
15217 +
15218 +#ifdef CONFIG_PARAVIRT
15219 + pushq %rdi
15220 + cmpl $0, pv_info+PARAVIRT_enabled
15221 + jz 1f
15222 + i = 0
15223 + .rept USER_PGD_PTRS
15224 + mov i*8(%rbx),%rsi
15225 + mov $0,%sil
15226 + lea i*8(%rbx),%rdi
15227 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15228 + i = i + 1
15229 + .endr
15230 + jmp 2f
15231 +1:
15232 +#endif
15233 +
15234 + i = 0
15235 + .rept USER_PGD_PTRS
15236 + movb $0,i*8(%rbx)
15237 + i = i + 1
15238 + .endr
15239 +
15240 +#ifdef CONFIG_PARAVIRT
15241 +2: popq %rdi
15242 +#endif
15243 + SET_RDI_INTO_CR3
15244 +
15245 +#ifdef CONFIG_PAX_KERNEXEC
15246 + GET_CR0_INTO_RDI
15247 + bts $16,%rdi
15248 + SET_RDI_INTO_CR0
15249 +#endif
15250 +
15251 +#ifdef CONFIG_PARAVIRT
15252 + PV_RESTORE_REGS(CLBR_RDI)
15253 +#endif
15254 +
15255 + popq %rbx
15256 + popq %rdi
15257 + pax_force_retaddr
15258 + retq
15259 +ENDPROC(pax_enter_kernel_user)
15260 +
15261 +ENTRY(pax_exit_kernel_user)
15262 + push %rdi
15263 +
15264 +#ifdef CONFIG_PARAVIRT
15265 + pushq %rbx
15266 + PV_SAVE_REGS(CLBR_RDI)
15267 +#endif
15268 +
15269 +#ifdef CONFIG_PAX_KERNEXEC
15270 + GET_CR0_INTO_RDI
15271 + btr $16,%rdi
15272 + SET_RDI_INTO_CR0
15273 +#endif
15274 +
15275 + GET_CR3_INTO_RDI
15276 + add $__START_KERNEL_map,%rdi
15277 + sub phys_base(%rip),%rdi
15278 +
15279 +#ifdef CONFIG_PARAVIRT
15280 + cmpl $0, pv_info+PARAVIRT_enabled
15281 + jz 1f
15282 + mov %rdi,%rbx
15283 + i = 0
15284 + .rept USER_PGD_PTRS
15285 + mov i*8(%rbx),%rsi
15286 + mov $0x67,%sil
15287 + lea i*8(%rbx),%rdi
15288 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15289 + i = i + 1
15290 + .endr
15291 + jmp 2f
15292 +1:
15293 +#endif
15294 +
15295 + i = 0
15296 + .rept USER_PGD_PTRS
15297 + movb $0x67,i*8(%rdi)
15298 + i = i + 1
15299 + .endr
15300 +
15301 +#ifdef CONFIG_PARAVIRT
15302 +2: PV_RESTORE_REGS(CLBR_RDI)
15303 + popq %rbx
15304 +#endif
15305 +
15306 + popq %rdi
15307 + pax_force_retaddr
15308 + retq
15309 +ENDPROC(pax_exit_kernel_user)
15310 +#endif
15311 +
15312 +.macro pax_erase_kstack
15313 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15314 + call pax_erase_kstack
15315 +#endif
15316 +.endm
15317 +
15318 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15319 +/*
15320 + * r11: thread_info
15321 + * rcx, rdx: can be clobbered
15322 + */
15323 +ENTRY(pax_erase_kstack)
15324 + pushq %rdi
15325 + pushq %rax
15326 + pushq %r11
15327 +
15328 + GET_THREAD_INFO(%r11)
15329 + mov TI_lowest_stack(%r11), %rdi
15330 + mov $-0xBEEF, %rax
15331 + std
15332 +
15333 +1: mov %edi, %ecx
15334 + and $THREAD_SIZE_asm - 1, %ecx
15335 + shr $3, %ecx
15336 + repne scasq
15337 + jecxz 2f
15338 +
15339 + cmp $2*8, %ecx
15340 + jc 2f
15341 +
15342 + mov $2*8, %ecx
15343 + repe scasq
15344 + jecxz 2f
15345 + jne 1b
15346 +
15347 +2: cld
15348 + mov %esp, %ecx
15349 + sub %edi, %ecx
15350 +
15351 + cmp $THREAD_SIZE_asm, %rcx
15352 + jb 3f
15353 + ud2
15354 +3:
15355 +
15356 + shr $3, %ecx
15357 + rep stosq
15358 +
15359 + mov TI_task_thread_sp0(%r11), %rdi
15360 + sub $256, %rdi
15361 + mov %rdi, TI_lowest_stack(%r11)
15362 +
15363 + popq %r11
15364 + popq %rax
15365 + popq %rdi
15366 + pax_force_retaddr
15367 + ret
15368 +ENDPROC(pax_erase_kstack)
15369 +#endif
15370
15371 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15372 #ifdef CONFIG_TRACE_IRQFLAGS
15373 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15374 .endm
15375
15376 .macro UNFAKE_STACK_FRAME
15377 - addq $8*6, %rsp
15378 - CFI_ADJUST_CFA_OFFSET -(6*8)
15379 + addq $8*6 + ARG_SKIP, %rsp
15380 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15381 .endm
15382
15383 /*
15384 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15385 movq %rsp, %rsi
15386
15387 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15388 - testl $3, CS(%rdi)
15389 + testb $3, CS(%rdi)
15390 je 1f
15391 SWAPGS
15392 /*
15393 @@ -356,9 +640,10 @@ ENTRY(save_rest)
15394 movq_cfi r15, R15+16
15395 movq %r11, 8(%rsp) /* return address */
15396 FIXUP_TOP_OF_STACK %r11, 16
15397 + pax_force_retaddr
15398 ret
15399 CFI_ENDPROC
15400 -END(save_rest)
15401 +ENDPROC(save_rest)
15402
15403 /* save complete stack frame */
15404 .pushsection .kprobes.text, "ax"
15405 @@ -387,9 +672,10 @@ ENTRY(save_paranoid)
15406 js 1f /* negative -> in kernel */
15407 SWAPGS
15408 xorl %ebx,%ebx
15409 -1: ret
15410 +1: pax_force_retaddr_bts
15411 + ret
15412 CFI_ENDPROC
15413 -END(save_paranoid)
15414 +ENDPROC(save_paranoid)
15415 .popsection
15416
15417 /*
15418 @@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
15419
15420 RESTORE_REST
15421
15422 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15423 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15424 jz retint_restore_args
15425
15426 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15427 @@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
15428 jmp ret_from_sys_call # go to the SYSRET fastpath
15429
15430 CFI_ENDPROC
15431 -END(ret_from_fork)
15432 +ENDPROC(ret_from_fork)
15433
15434 /*
15435 * System call entry. Up to 6 arguments in registers are supported.
15436 @@ -457,7 +743,7 @@ END(ret_from_fork)
15437 ENTRY(system_call)
15438 CFI_STARTPROC simple
15439 CFI_SIGNAL_FRAME
15440 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15441 + CFI_DEF_CFA rsp,0
15442 CFI_REGISTER rip,rcx
15443 /*CFI_REGISTER rflags,r11*/
15444 SWAPGS_UNSAFE_STACK
15445 @@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
15446
15447 movq %rsp,PER_CPU_VAR(old_rsp)
15448 movq PER_CPU_VAR(kernel_stack),%rsp
15449 + SAVE_ARGS 8*6,0
15450 + pax_enter_kernel_user
15451 /*
15452 * No need to follow this irqs off/on section - it's straight
15453 * and short:
15454 */
15455 ENABLE_INTERRUPTS(CLBR_NONE)
15456 - SAVE_ARGS 8,0
15457 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15458 movq %rcx,RIP-ARGOFFSET(%rsp)
15459 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15460 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15461 + GET_THREAD_INFO(%rcx)
15462 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
15463 jnz tracesys
15464 system_call_fastpath:
15465 cmpq $__NR_syscall_max,%rax
15466 ja badsys
15467 - movq %r10,%rcx
15468 + movq R10-ARGOFFSET(%rsp),%rcx
15469 call *sys_call_table(,%rax,8) # XXX: rip relative
15470 movq %rax,RAX-ARGOFFSET(%rsp)
15471 /*
15472 @@ -498,10 +786,13 @@ sysret_check:
15473 LOCKDEP_SYS_EXIT
15474 DISABLE_INTERRUPTS(CLBR_NONE)
15475 TRACE_IRQS_OFF
15476 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
15477 + GET_THREAD_INFO(%rcx)
15478 + movl TI_flags(%rcx),%edx
15479 andl %edi,%edx
15480 jnz sysret_careful
15481 CFI_REMEMBER_STATE
15482 + pax_exit_kernel_user
15483 + pax_erase_kstack
15484 /*
15485 * sysretq will re-enable interrupts:
15486 */
15487 @@ -553,14 +844,18 @@ badsys:
15488 * jump back to the normal fast path.
15489 */
15490 auditsys:
15491 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
15492 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15493 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15494 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15495 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15496 movq %rax,%rsi /* 2nd arg: syscall number */
15497 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15498 call __audit_syscall_entry
15499 +
15500 + pax_erase_kstack
15501 +
15502 LOAD_ARGS 0 /* reload call-clobbered registers */
15503 + pax_set_fptr_mask
15504 jmp system_call_fastpath
15505
15506 /*
15507 @@ -581,7 +876,7 @@ sysret_audit:
15508 /* Do syscall tracing */
15509 tracesys:
15510 #ifdef CONFIG_AUDITSYSCALL
15511 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15512 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
15513 jz auditsys
15514 #endif
15515 SAVE_REST
15516 @@ -589,16 +884,20 @@ tracesys:
15517 FIXUP_TOP_OF_STACK %rdi
15518 movq %rsp,%rdi
15519 call syscall_trace_enter
15520 +
15521 + pax_erase_kstack
15522 +
15523 /*
15524 * Reload arg registers from stack in case ptrace changed them.
15525 * We don't reload %rax because syscall_trace_enter() returned
15526 * the value it wants us to use in the table lookup.
15527 */
15528 LOAD_ARGS ARGOFFSET, 1
15529 + pax_set_fptr_mask
15530 RESTORE_REST
15531 cmpq $__NR_syscall_max,%rax
15532 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15533 - movq %r10,%rcx /* fixup for C */
15534 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15535 call *sys_call_table(,%rax,8)
15536 movq %rax,RAX-ARGOFFSET(%rsp)
15537 /* Use IRET because user could have changed frame */
15538 @@ -619,6 +918,7 @@ GLOBAL(int_with_check)
15539 andl %edi,%edx
15540 jnz int_careful
15541 andl $~TS_COMPAT,TI_status(%rcx)
15542 + pax_erase_kstack
15543 jmp retint_swapgs
15544
15545 /* Either reschedule or signal or syscall exit tracking needed. */
15546 @@ -665,7 +965,7 @@ int_restore_rest:
15547 TRACE_IRQS_OFF
15548 jmp int_with_check
15549 CFI_ENDPROC
15550 -END(system_call)
15551 +ENDPROC(system_call)
15552
15553 /*
15554 * Certain special system calls that need to save a complete full stack frame.
15555 @@ -681,7 +981,7 @@ ENTRY(\label)
15556 call \func
15557 jmp ptregscall_common
15558 CFI_ENDPROC
15559 -END(\label)
15560 +ENDPROC(\label)
15561 .endm
15562
15563 PTREGSCALL stub_clone, sys_clone, %r8
15564 @@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
15565 movq_cfi_restore R12+8, r12
15566 movq_cfi_restore RBP+8, rbp
15567 movq_cfi_restore RBX+8, rbx
15568 + pax_force_retaddr
15569 ret $REST_SKIP /* pop extended registers */
15570 CFI_ENDPROC
15571 -END(ptregscall_common)
15572 +ENDPROC(ptregscall_common)
15573
15574 ENTRY(stub_execve)
15575 CFI_STARTPROC
15576 @@ -716,7 +1017,7 @@ ENTRY(stub_execve)
15577 RESTORE_REST
15578 jmp int_ret_from_sys_call
15579 CFI_ENDPROC
15580 -END(stub_execve)
15581 +ENDPROC(stub_execve)
15582
15583 /*
15584 * sigreturn is special because it needs to restore all registers on return.
15585 @@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
15586 RESTORE_REST
15587 jmp int_ret_from_sys_call
15588 CFI_ENDPROC
15589 -END(stub_rt_sigreturn)
15590 +ENDPROC(stub_rt_sigreturn)
15591
15592 /*
15593 * Build the entry stubs and pointer table with some assembler magic.
15594 @@ -769,7 +1070,7 @@ vector=vector+1
15595 2: jmp common_interrupt
15596 .endr
15597 CFI_ENDPROC
15598 -END(irq_entries_start)
15599 +ENDPROC(irq_entries_start)
15600
15601 .previous
15602 END(interrupt)
15603 @@ -789,6 +1090,16 @@ END(interrupt)
15604 subq $ORIG_RAX-RBP, %rsp
15605 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15606 SAVE_ARGS_IRQ
15607 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15608 + testb $3, CS(%rdi)
15609 + jnz 1f
15610 + pax_enter_kernel
15611 + jmp 2f
15612 +1: pax_enter_kernel_user
15613 +2:
15614 +#else
15615 + pax_enter_kernel
15616 +#endif
15617 call \func
15618 .endm
15619
15620 @@ -820,7 +1131,7 @@ ret_from_intr:
15621
15622 exit_intr:
15623 GET_THREAD_INFO(%rcx)
15624 - testl $3,CS-ARGOFFSET(%rsp)
15625 + testb $3,CS-ARGOFFSET(%rsp)
15626 je retint_kernel
15627
15628 /* Interrupt came from user space */
15629 @@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
15630 * The iretq could re-enable interrupts:
15631 */
15632 DISABLE_INTERRUPTS(CLBR_ANY)
15633 + pax_exit_kernel_user
15634 TRACE_IRQS_IRETQ
15635 SWAPGS
15636 jmp restore_args
15637
15638 retint_restore_args: /* return to kernel space */
15639 DISABLE_INTERRUPTS(CLBR_ANY)
15640 + pax_exit_kernel
15641 + pax_force_retaddr RIP-ARGOFFSET
15642 /*
15643 * The iretq could re-enable interrupts:
15644 */
15645 @@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
15646 #endif
15647
15648 CFI_ENDPROC
15649 -END(common_interrupt)
15650 +ENDPROC(common_interrupt)
15651 /*
15652 * End of kprobes section
15653 */
15654 @@ -953,7 +1267,7 @@ ENTRY(\sym)
15655 interrupt \do_sym
15656 jmp ret_from_intr
15657 CFI_ENDPROC
15658 -END(\sym)
15659 +ENDPROC(\sym)
15660 .endm
15661
15662 #ifdef CONFIG_SMP
15663 @@ -1026,12 +1340,22 @@ ENTRY(\sym)
15664 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15665 call error_entry
15666 DEFAULT_FRAME 0
15667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15668 + testb $3, CS(%rsp)
15669 + jnz 1f
15670 + pax_enter_kernel
15671 + jmp 2f
15672 +1: pax_enter_kernel_user
15673 +2:
15674 +#else
15675 + pax_enter_kernel
15676 +#endif
15677 movq %rsp,%rdi /* pt_regs pointer */
15678 xorl %esi,%esi /* no error code */
15679 call \do_sym
15680 jmp error_exit /* %ebx: no swapgs flag */
15681 CFI_ENDPROC
15682 -END(\sym)
15683 +ENDPROC(\sym)
15684 .endm
15685
15686 .macro paranoidzeroentry sym do_sym
15687 @@ -1043,15 +1367,25 @@ ENTRY(\sym)
15688 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15689 call save_paranoid
15690 TRACE_IRQS_OFF
15691 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15692 + testb $3, CS(%rsp)
15693 + jnz 1f
15694 + pax_enter_kernel
15695 + jmp 2f
15696 +1: pax_enter_kernel_user
15697 +2:
15698 +#else
15699 + pax_enter_kernel
15700 +#endif
15701 movq %rsp,%rdi /* pt_regs pointer */
15702 xorl %esi,%esi /* no error code */
15703 call \do_sym
15704 jmp paranoid_exit /* %ebx: no swapgs flag */
15705 CFI_ENDPROC
15706 -END(\sym)
15707 +ENDPROC(\sym)
15708 .endm
15709
15710 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15711 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15712 .macro paranoidzeroentry_ist sym do_sym ist
15713 ENTRY(\sym)
15714 INTR_FRAME
15715 @@ -1061,14 +1395,30 @@ ENTRY(\sym)
15716 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15717 call save_paranoid
15718 TRACE_IRQS_OFF
15719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15720 + testb $3, CS(%rsp)
15721 + jnz 1f
15722 + pax_enter_kernel
15723 + jmp 2f
15724 +1: pax_enter_kernel_user
15725 +2:
15726 +#else
15727 + pax_enter_kernel
15728 +#endif
15729 movq %rsp,%rdi /* pt_regs pointer */
15730 xorl %esi,%esi /* no error code */
15731 +#ifdef CONFIG_SMP
15732 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15733 + lea init_tss(%r12), %r12
15734 +#else
15735 + lea init_tss(%rip), %r12
15736 +#endif
15737 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15738 call \do_sym
15739 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15740 jmp paranoid_exit /* %ebx: no swapgs flag */
15741 CFI_ENDPROC
15742 -END(\sym)
15743 +ENDPROC(\sym)
15744 .endm
15745
15746 .macro errorentry sym do_sym
15747 @@ -1079,13 +1429,23 @@ ENTRY(\sym)
15748 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15749 call error_entry
15750 DEFAULT_FRAME 0
15751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15752 + testb $3, CS(%rsp)
15753 + jnz 1f
15754 + pax_enter_kernel
15755 + jmp 2f
15756 +1: pax_enter_kernel_user
15757 +2:
15758 +#else
15759 + pax_enter_kernel
15760 +#endif
15761 movq %rsp,%rdi /* pt_regs pointer */
15762 movq ORIG_RAX(%rsp),%rsi /* get error code */
15763 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15764 call \do_sym
15765 jmp error_exit /* %ebx: no swapgs flag */
15766 CFI_ENDPROC
15767 -END(\sym)
15768 +ENDPROC(\sym)
15769 .endm
15770
15771 /* error code is on the stack already */
15772 @@ -1098,13 +1458,23 @@ ENTRY(\sym)
15773 call save_paranoid
15774 DEFAULT_FRAME 0
15775 TRACE_IRQS_OFF
15776 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15777 + testb $3, CS(%rsp)
15778 + jnz 1f
15779 + pax_enter_kernel
15780 + jmp 2f
15781 +1: pax_enter_kernel_user
15782 +2:
15783 +#else
15784 + pax_enter_kernel
15785 +#endif
15786 movq %rsp,%rdi /* pt_regs pointer */
15787 movq ORIG_RAX(%rsp),%rsi /* get error code */
15788 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15789 call \do_sym
15790 jmp paranoid_exit /* %ebx: no swapgs flag */
15791 CFI_ENDPROC
15792 -END(\sym)
15793 +ENDPROC(\sym)
15794 .endm
15795
15796 zeroentry divide_error do_divide_error
15797 @@ -1134,9 +1504,10 @@ gs_change:
15798 2: mfence /* workaround */
15799 SWAPGS
15800 popfq_cfi
15801 + pax_force_retaddr
15802 ret
15803 CFI_ENDPROC
15804 -END(native_load_gs_index)
15805 +ENDPROC(native_load_gs_index)
15806
15807 .section __ex_table,"a"
15808 .align 8
15809 @@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
15810 * Here we are in the child and the registers are set as they were
15811 * at kernel_thread() invocation in the parent.
15812 */
15813 + pax_force_fptr %rsi
15814 call *%rsi
15815 # exit
15816 mov %eax, %edi
15817 call do_exit
15818 ud2 # padding for call trace
15819 CFI_ENDPROC
15820 -END(kernel_thread_helper)
15821 +ENDPROC(kernel_thread_helper)
15822
15823 /*
15824 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15825 @@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
15826 RESTORE_REST
15827 testq %rax,%rax
15828 je int_ret_from_sys_call
15829 - RESTORE_ARGS
15830 UNFAKE_STACK_FRAME
15831 + pax_force_retaddr
15832 ret
15833 CFI_ENDPROC
15834 -END(kernel_execve)
15835 +ENDPROC(kernel_execve)
15836
15837 /* Call softirq on interrupt stack. Interrupts are off. */
15838 ENTRY(call_softirq)
15839 @@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
15840 CFI_DEF_CFA_REGISTER rsp
15841 CFI_ADJUST_CFA_OFFSET -8
15842 decl PER_CPU_VAR(irq_count)
15843 + pax_force_retaddr
15844 ret
15845 CFI_ENDPROC
15846 -END(call_softirq)
15847 +ENDPROC(call_softirq)
15848
15849 #ifdef CONFIG_XEN
15850 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
15851 @@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
15852 decl PER_CPU_VAR(irq_count)
15853 jmp error_exit
15854 CFI_ENDPROC
15855 -END(xen_do_hypervisor_callback)
15856 +ENDPROC(xen_do_hypervisor_callback)
15857
15858 /*
15859 * Hypervisor uses this for application faults while it executes.
15860 @@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
15861 SAVE_ALL
15862 jmp error_exit
15863 CFI_ENDPROC
15864 -END(xen_failsafe_callback)
15865 +ENDPROC(xen_failsafe_callback)
15866
15867 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
15868 xen_hvm_callback_vector xen_evtchn_do_upcall
15869 @@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
15870 TRACE_IRQS_OFF
15871 testl %ebx,%ebx /* swapgs needed? */
15872 jnz paranoid_restore
15873 - testl $3,CS(%rsp)
15874 + testb $3,CS(%rsp)
15875 jnz paranoid_userspace
15876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15877 + pax_exit_kernel
15878 + TRACE_IRQS_IRETQ 0
15879 + SWAPGS_UNSAFE_STACK
15880 + RESTORE_ALL 8
15881 + pax_force_retaddr_bts
15882 + jmp irq_return
15883 +#endif
15884 paranoid_swapgs:
15885 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15886 + pax_exit_kernel_user
15887 +#else
15888 + pax_exit_kernel
15889 +#endif
15890 TRACE_IRQS_IRETQ 0
15891 SWAPGS_UNSAFE_STACK
15892 RESTORE_ALL 8
15893 jmp irq_return
15894 paranoid_restore:
15895 + pax_exit_kernel
15896 TRACE_IRQS_IRETQ 0
15897 RESTORE_ALL 8
15898 + pax_force_retaddr_bts
15899 jmp irq_return
15900 paranoid_userspace:
15901 GET_THREAD_INFO(%rcx)
15902 @@ -1399,7 +1787,7 @@ paranoid_schedule:
15903 TRACE_IRQS_OFF
15904 jmp paranoid_userspace
15905 CFI_ENDPROC
15906 -END(paranoid_exit)
15907 +ENDPROC(paranoid_exit)
15908
15909 /*
15910 * Exception entry point. This expects an error code/orig_rax on the stack.
15911 @@ -1426,12 +1814,13 @@ ENTRY(error_entry)
15912 movq_cfi r14, R14+8
15913 movq_cfi r15, R15+8
15914 xorl %ebx,%ebx
15915 - testl $3,CS+8(%rsp)
15916 + testb $3,CS+8(%rsp)
15917 je error_kernelspace
15918 error_swapgs:
15919 SWAPGS
15920 error_sti:
15921 TRACE_IRQS_OFF
15922 + pax_force_retaddr_bts
15923 ret
15924
15925 /*
15926 @@ -1458,7 +1847,7 @@ bstep_iret:
15927 movq %rcx,RIP+8(%rsp)
15928 jmp error_swapgs
15929 CFI_ENDPROC
15930 -END(error_entry)
15931 +ENDPROC(error_entry)
15932
15933
15934 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
15935 @@ -1478,7 +1867,7 @@ ENTRY(error_exit)
15936 jnz retint_careful
15937 jmp retint_swapgs
15938 CFI_ENDPROC
15939 -END(error_exit)
15940 +ENDPROC(error_exit)
15941
15942 /*
15943 * Test if a given stack is an NMI stack or not.
15944 @@ -1535,9 +1924,11 @@ ENTRY(nmi)
15945 * If %cs was not the kernel segment, then the NMI triggered in user
15946 * space, which means it is definitely not nested.
15947 */
15948 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
15949 + je 1f
15950 cmpl $__KERNEL_CS, 16(%rsp)
15951 jne first_nmi
15952 -
15953 +1:
15954 /*
15955 * Check the special variable on the stack to see if NMIs are
15956 * executing.
15957 @@ -1659,6 +2050,16 @@ restart_nmi:
15958 */
15959 call save_paranoid
15960 DEFAULT_FRAME 0
15961 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15962 + testb $3, CS(%rsp)
15963 + jnz 1f
15964 + pax_enter_kernel
15965 + jmp 2f
15966 +1: pax_enter_kernel_user
15967 +2:
15968 +#else
15969 + pax_enter_kernel
15970 +#endif
15971 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
15972 movq %rsp,%rdi
15973 movq $-1,%rsi
15974 @@ -1666,14 +2067,25 @@ restart_nmi:
15975 testl %ebx,%ebx /* swapgs needed? */
15976 jnz nmi_restore
15977 nmi_swapgs:
15978 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15979 + pax_exit_kernel_user
15980 +#else
15981 + pax_exit_kernel
15982 +#endif
15983 SWAPGS_UNSAFE_STACK
15984 + RESTORE_ALL 8
15985 + /* Clear the NMI executing stack variable */
15986 + movq $0, 10*8(%rsp)
15987 + jmp irq_return
15988 nmi_restore:
15989 + pax_exit_kernel
15990 RESTORE_ALL 8
15991 + pax_force_retaddr_bts
15992 /* Clear the NMI executing stack variable */
15993 movq $0, 10*8(%rsp)
15994 jmp irq_return
15995 CFI_ENDPROC
15996 -END(nmi)
15997 +ENDPROC(nmi)
15998
15999 /*
16000 * If an NMI hit an iret because of an exception or breakpoint,
16001 @@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16002 mov $-ENOSYS,%eax
16003 sysret
16004 CFI_ENDPROC
16005 -END(ignore_sysret)
16006 +ENDPROC(ignore_sysret)
16007
16008 /*
16009 * End of kprobes section
16010 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16011 index c9a281f..ce2f317 100644
16012 --- a/arch/x86/kernel/ftrace.c
16013 +++ b/arch/x86/kernel/ftrace.c
16014 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16015 static const void *mod_code_newcode; /* holds the text to write to the IP */
16016
16017 static unsigned nmi_wait_count;
16018 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16019 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16020
16021 int ftrace_arch_read_dyn_info(char *buf, int size)
16022 {
16023 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16024
16025 r = snprintf(buf, size, "%u %u",
16026 nmi_wait_count,
16027 - atomic_read(&nmi_update_count));
16028 + atomic_read_unchecked(&nmi_update_count));
16029 return r;
16030 }
16031
16032 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16033
16034 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16035 smp_rmb();
16036 + pax_open_kernel();
16037 ftrace_mod_code();
16038 - atomic_inc(&nmi_update_count);
16039 + pax_close_kernel();
16040 + atomic_inc_unchecked(&nmi_update_count);
16041 }
16042 /* Must have previous changes seen before executions */
16043 smp_mb();
16044 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16045 {
16046 unsigned char replaced[MCOUNT_INSN_SIZE];
16047
16048 + ip = ktla_ktva(ip);
16049 +
16050 /*
16051 * Note: Due to modules and __init, code can
16052 * disappear and change, we need to protect against faulting
16053 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16054 unsigned char old[MCOUNT_INSN_SIZE], *new;
16055 int ret;
16056
16057 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16058 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16059 new = ftrace_call_replace(ip, (unsigned long)func);
16060 ret = ftrace_modify_code(ip, old, new);
16061
16062 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16063 {
16064 unsigned char code[MCOUNT_INSN_SIZE];
16065
16066 + ip = ktla_ktva(ip);
16067 +
16068 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16069 return -EFAULT;
16070
16071 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16072 index 51ff186..9e77418 100644
16073 --- a/arch/x86/kernel/head32.c
16074 +++ b/arch/x86/kernel/head32.c
16075 @@ -19,6 +19,7 @@
16076 #include <asm/io_apic.h>
16077 #include <asm/bios_ebda.h>
16078 #include <asm/tlbflush.h>
16079 +#include <asm/boot.h>
16080
16081 static void __init i386_default_early_setup(void)
16082 {
16083 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16084
16085 void __init i386_start_kernel(void)
16086 {
16087 - memblock_reserve(__pa_symbol(&_text),
16088 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16089 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16090
16091 #ifdef CONFIG_BLK_DEV_INITRD
16092 /* Reserve INITRD */
16093 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16094 index ce0be7c..c41476e 100644
16095 --- a/arch/x86/kernel/head_32.S
16096 +++ b/arch/x86/kernel/head_32.S
16097 @@ -25,6 +25,12 @@
16098 /* Physical address */
16099 #define pa(X) ((X) - __PAGE_OFFSET)
16100
16101 +#ifdef CONFIG_PAX_KERNEXEC
16102 +#define ta(X) (X)
16103 +#else
16104 +#define ta(X) ((X) - __PAGE_OFFSET)
16105 +#endif
16106 +
16107 /*
16108 * References to members of the new_cpu_data structure.
16109 */
16110 @@ -54,11 +60,7 @@
16111 * and small than max_low_pfn, otherwise will waste some page table entries
16112 */
16113
16114 -#if PTRS_PER_PMD > 1
16115 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16116 -#else
16117 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16118 -#endif
16119 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16120
16121 /* Number of possible pages in the lowmem region */
16122 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16123 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16124 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16125
16126 /*
16127 + * Real beginning of normal "text" segment
16128 + */
16129 +ENTRY(stext)
16130 +ENTRY(_stext)
16131 +
16132 +/*
16133 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16134 * %esi points to the real-mode code as a 32-bit pointer.
16135 * CS and DS must be 4 GB flat segments, but we don't depend on
16136 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16137 * can.
16138 */
16139 __HEAD
16140 +
16141 +#ifdef CONFIG_PAX_KERNEXEC
16142 + jmp startup_32
16143 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16144 +.fill PAGE_SIZE-5,1,0xcc
16145 +#endif
16146 +
16147 ENTRY(startup_32)
16148 movl pa(stack_start),%ecx
16149
16150 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16151 2:
16152 leal -__PAGE_OFFSET(%ecx),%esp
16153
16154 +#ifdef CONFIG_SMP
16155 + movl $pa(cpu_gdt_table),%edi
16156 + movl $__per_cpu_load,%eax
16157 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16158 + rorl $16,%eax
16159 + movb %al,__KERNEL_PERCPU + 4(%edi)
16160 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16161 + movl $__per_cpu_end - 1,%eax
16162 + subl $__per_cpu_start,%eax
16163 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16164 +#endif
16165 +
16166 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16167 + movl $NR_CPUS,%ecx
16168 + movl $pa(cpu_gdt_table),%edi
16169 +1:
16170 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16171 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16172 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16173 + addl $PAGE_SIZE_asm,%edi
16174 + loop 1b
16175 +#endif
16176 +
16177 +#ifdef CONFIG_PAX_KERNEXEC
16178 + movl $pa(boot_gdt),%edi
16179 + movl $__LOAD_PHYSICAL_ADDR,%eax
16180 + movw %ax,__BOOT_CS + 2(%edi)
16181 + rorl $16,%eax
16182 + movb %al,__BOOT_CS + 4(%edi)
16183 + movb %ah,__BOOT_CS + 7(%edi)
16184 + rorl $16,%eax
16185 +
16186 + ljmp $(__BOOT_CS),$1f
16187 +1:
16188 +
16189 + movl $NR_CPUS,%ecx
16190 + movl $pa(cpu_gdt_table),%edi
16191 + addl $__PAGE_OFFSET,%eax
16192 +1:
16193 + movw %ax,__KERNEL_CS + 2(%edi)
16194 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16195 + rorl $16,%eax
16196 + movb %al,__KERNEL_CS + 4(%edi)
16197 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16198 + movb %ah,__KERNEL_CS + 7(%edi)
16199 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16200 + rorl $16,%eax
16201 + addl $PAGE_SIZE_asm,%edi
16202 + loop 1b
16203 +#endif
16204 +
16205 /*
16206 * Clear BSS first so that there are no surprises...
16207 */
16208 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16209 movl %eax, pa(max_pfn_mapped)
16210
16211 /* Do early initialization of the fixmap area */
16212 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16213 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16214 +#ifdef CONFIG_COMPAT_VDSO
16215 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16216 +#else
16217 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16218 +#endif
16219 #else /* Not PAE */
16220
16221 page_pde_offset = (__PAGE_OFFSET >> 20);
16222 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16223 movl %eax, pa(max_pfn_mapped)
16224
16225 /* Do early initialization of the fixmap area */
16226 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16227 - movl %eax,pa(initial_page_table+0xffc)
16228 +#ifdef CONFIG_COMPAT_VDSO
16229 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16230 +#else
16231 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16232 +#endif
16233 #endif
16234
16235 #ifdef CONFIG_PARAVIRT
16236 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16237 cmpl $num_subarch_entries, %eax
16238 jae bad_subarch
16239
16240 - movl pa(subarch_entries)(,%eax,4), %eax
16241 - subl $__PAGE_OFFSET, %eax
16242 - jmp *%eax
16243 + jmp *pa(subarch_entries)(,%eax,4)
16244
16245 bad_subarch:
16246 WEAK(lguest_entry)
16247 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16248 __INITDATA
16249
16250 subarch_entries:
16251 - .long default_entry /* normal x86/PC */
16252 - .long lguest_entry /* lguest hypervisor */
16253 - .long xen_entry /* Xen hypervisor */
16254 - .long default_entry /* Moorestown MID */
16255 + .long ta(default_entry) /* normal x86/PC */
16256 + .long ta(lguest_entry) /* lguest hypervisor */
16257 + .long ta(xen_entry) /* Xen hypervisor */
16258 + .long ta(default_entry) /* Moorestown MID */
16259 num_subarch_entries = (. - subarch_entries) / 4
16260 .previous
16261 #else
16262 @@ -312,6 +382,7 @@ default_entry:
16263 orl %edx,%eax
16264 movl %eax,%cr4
16265
16266 +#ifdef CONFIG_X86_PAE
16267 testb $X86_CR4_PAE, %al # check if PAE is enabled
16268 jz 6f
16269
16270 @@ -340,6 +411,9 @@ default_entry:
16271 /* Make changes effective */
16272 wrmsr
16273
16274 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16275 +#endif
16276 +
16277 6:
16278
16279 /*
16280 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16281 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16282 movl %eax,%ss # after changing gdt.
16283
16284 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16285 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16286 movl %eax,%ds
16287 movl %eax,%es
16288
16289 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16290 */
16291 cmpb $0,ready
16292 jne 1f
16293 - movl $gdt_page,%eax
16294 + movl $cpu_gdt_table,%eax
16295 movl $stack_canary,%ecx
16296 +#ifdef CONFIG_SMP
16297 + addl $__per_cpu_load,%ecx
16298 +#endif
16299 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16300 shrl $16, %ecx
16301 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16302 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16303 1:
16304 -#endif
16305 movl $(__KERNEL_STACK_CANARY),%eax
16306 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16307 + movl $(__USER_DS),%eax
16308 +#else
16309 + xorl %eax,%eax
16310 +#endif
16311 movl %eax,%gs
16312
16313 xorl %eax,%eax # Clear LDT
16314 @@ -558,22 +639,22 @@ early_page_fault:
16315 jmp early_fault
16316
16317 early_fault:
16318 - cld
16319 #ifdef CONFIG_PRINTK
16320 + cmpl $1,%ss:early_recursion_flag
16321 + je hlt_loop
16322 + incl %ss:early_recursion_flag
16323 + cld
16324 pusha
16325 movl $(__KERNEL_DS),%eax
16326 movl %eax,%ds
16327 movl %eax,%es
16328 - cmpl $2,early_recursion_flag
16329 - je hlt_loop
16330 - incl early_recursion_flag
16331 movl %cr2,%eax
16332 pushl %eax
16333 pushl %edx /* trapno */
16334 pushl $fault_msg
16335 call printk
16336 +; call dump_stack
16337 #endif
16338 - call dump_stack
16339 hlt_loop:
16340 hlt
16341 jmp hlt_loop
16342 @@ -581,8 +662,11 @@ hlt_loop:
16343 /* This is the default interrupt "handler" :-) */
16344 ALIGN
16345 ignore_int:
16346 - cld
16347 #ifdef CONFIG_PRINTK
16348 + cmpl $2,%ss:early_recursion_flag
16349 + je hlt_loop
16350 + incl %ss:early_recursion_flag
16351 + cld
16352 pushl %eax
16353 pushl %ecx
16354 pushl %edx
16355 @@ -591,9 +675,6 @@ ignore_int:
16356 movl $(__KERNEL_DS),%eax
16357 movl %eax,%ds
16358 movl %eax,%es
16359 - cmpl $2,early_recursion_flag
16360 - je hlt_loop
16361 - incl early_recursion_flag
16362 pushl 16(%esp)
16363 pushl 24(%esp)
16364 pushl 32(%esp)
16365 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16366 /*
16367 * BSS section
16368 */
16369 -__PAGE_ALIGNED_BSS
16370 - .align PAGE_SIZE
16371 #ifdef CONFIG_X86_PAE
16372 +.section .initial_pg_pmd,"a",@progbits
16373 initial_pg_pmd:
16374 .fill 1024*KPMDS,4,0
16375 #else
16376 +.section .initial_page_table,"a",@progbits
16377 ENTRY(initial_page_table)
16378 .fill 1024,4,0
16379 #endif
16380 +.section .initial_pg_fixmap,"a",@progbits
16381 initial_pg_fixmap:
16382 .fill 1024,4,0
16383 +.section .empty_zero_page,"a",@progbits
16384 ENTRY(empty_zero_page)
16385 .fill 4096,1,0
16386 +.section .swapper_pg_dir,"a",@progbits
16387 ENTRY(swapper_pg_dir)
16388 +#ifdef CONFIG_X86_PAE
16389 + .fill 4,8,0
16390 +#else
16391 .fill 1024,4,0
16392 +#endif
16393 +
16394 +/*
16395 + * The IDT has to be page-aligned to simplify the Pentium
16396 + * F0 0F bug workaround.. We have a special link segment
16397 + * for this.
16398 + */
16399 +.section .idt,"a",@progbits
16400 +ENTRY(idt_table)
16401 + .fill 256,8,0
16402
16403 /*
16404 * This starts the data section.
16405 */
16406 #ifdef CONFIG_X86_PAE
16407 -__PAGE_ALIGNED_DATA
16408 - /* Page-aligned for the benefit of paravirt? */
16409 - .align PAGE_SIZE
16410 +.section .initial_page_table,"a",@progbits
16411 ENTRY(initial_page_table)
16412 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16413 # if KPMDS == 3
16414 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16415 # error "Kernel PMDs should be 1, 2 or 3"
16416 # endif
16417 .align PAGE_SIZE /* needs to be page-sized too */
16418 +
16419 +#ifdef CONFIG_PAX_PER_CPU_PGD
16420 +ENTRY(cpu_pgd)
16421 + .rept NR_CPUS
16422 + .fill 4,8,0
16423 + .endr
16424 +#endif
16425 +
16426 #endif
16427
16428 .data
16429 .balign 4
16430 ENTRY(stack_start)
16431 - .long init_thread_union+THREAD_SIZE
16432 + .long init_thread_union+THREAD_SIZE-8
16433
16434 +ready: .byte 0
16435 +
16436 +.section .rodata,"a",@progbits
16437 early_recursion_flag:
16438 .long 0
16439
16440 -ready: .byte 0
16441 -
16442 int_msg:
16443 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16444
16445 @@ -707,7 +811,7 @@ fault_msg:
16446 .word 0 # 32 bit align gdt_desc.address
16447 boot_gdt_descr:
16448 .word __BOOT_DS+7
16449 - .long boot_gdt - __PAGE_OFFSET
16450 + .long pa(boot_gdt)
16451
16452 .word 0 # 32-bit align idt_desc.address
16453 idt_descr:
16454 @@ -718,7 +822,7 @@ idt_descr:
16455 .word 0 # 32 bit align gdt_desc.address
16456 ENTRY(early_gdt_descr)
16457 .word GDT_ENTRIES*8-1
16458 - .long gdt_page /* Overwritten for secondary CPUs */
16459 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
16460
16461 /*
16462 * The boot_gdt must mirror the equivalent in setup.S and is
16463 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16464 .align L1_CACHE_BYTES
16465 ENTRY(boot_gdt)
16466 .fill GDT_ENTRY_BOOT_CS,8,0
16467 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16468 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16469 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16470 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16471 +
16472 + .align PAGE_SIZE_asm
16473 +ENTRY(cpu_gdt_table)
16474 + .rept NR_CPUS
16475 + .quad 0x0000000000000000 /* NULL descriptor */
16476 + .quad 0x0000000000000000 /* 0x0b reserved */
16477 + .quad 0x0000000000000000 /* 0x13 reserved */
16478 + .quad 0x0000000000000000 /* 0x1b reserved */
16479 +
16480 +#ifdef CONFIG_PAX_KERNEXEC
16481 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16482 +#else
16483 + .quad 0x0000000000000000 /* 0x20 unused */
16484 +#endif
16485 +
16486 + .quad 0x0000000000000000 /* 0x28 unused */
16487 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16488 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16489 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16490 + .quad 0x0000000000000000 /* 0x4b reserved */
16491 + .quad 0x0000000000000000 /* 0x53 reserved */
16492 + .quad 0x0000000000000000 /* 0x5b reserved */
16493 +
16494 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16495 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16496 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16497 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16498 +
16499 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16500 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16501 +
16502 + /*
16503 + * Segments used for calling PnP BIOS have byte granularity.
16504 + * The code segments and data segments have fixed 64k limits,
16505 + * the transfer segment sizes are set at run time.
16506 + */
16507 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
16508 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
16509 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
16510 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
16511 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
16512 +
16513 + /*
16514 + * The APM segments have byte granularity and their bases
16515 + * are set at run time. All have 64k limits.
16516 + */
16517 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16518 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16519 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
16520 +
16521 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16522 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16523 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16524 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16525 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16526 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16527 +
16528 + /* Be sure this is zeroed to avoid false validations in Xen */
16529 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16530 + .endr
16531 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16532 index 40f4eb3..6d24d9d 100644
16533 --- a/arch/x86/kernel/head_64.S
16534 +++ b/arch/x86/kernel/head_64.S
16535 @@ -19,6 +19,8 @@
16536 #include <asm/cache.h>
16537 #include <asm/processor-flags.h>
16538 #include <asm/percpu.h>
16539 +#include <asm/cpufeature.h>
16540 +#include <asm/alternative-asm.h>
16541
16542 #ifdef CONFIG_PARAVIRT
16543 #include <asm/asm-offsets.h>
16544 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16545 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16546 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16547 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16548 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
16549 +L3_VMALLOC_START = pud_index(VMALLOC_START)
16550 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
16551 +L3_VMALLOC_END = pud_index(VMALLOC_END)
16552 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16553 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16554
16555 .text
16556 __HEAD
16557 @@ -85,35 +93,23 @@ startup_64:
16558 */
16559 addq %rbp, init_level4_pgt + 0(%rip)
16560 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16561 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16562 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16563 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16564 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16565
16566 addq %rbp, level3_ident_pgt + 0(%rip)
16567 +#ifndef CONFIG_XEN
16568 + addq %rbp, level3_ident_pgt + 8(%rip)
16569 +#endif
16570
16571 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16572 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16573 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16574 +
16575 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16576 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16577
16578 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16579 -
16580 - /* Add an Identity mapping if I am above 1G */
16581 - leaq _text(%rip), %rdi
16582 - andq $PMD_PAGE_MASK, %rdi
16583 -
16584 - movq %rdi, %rax
16585 - shrq $PUD_SHIFT, %rax
16586 - andq $(PTRS_PER_PUD - 1), %rax
16587 - jz ident_complete
16588 -
16589 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16590 - leaq level3_ident_pgt(%rip), %rbx
16591 - movq %rdx, 0(%rbx, %rax, 8)
16592 -
16593 - movq %rdi, %rax
16594 - shrq $PMD_SHIFT, %rax
16595 - andq $(PTRS_PER_PMD - 1), %rax
16596 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16597 - leaq level2_spare_pgt(%rip), %rbx
16598 - movq %rdx, 0(%rbx, %rax, 8)
16599 -ident_complete:
16600 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16601
16602 /*
16603 * Fixup the kernel text+data virtual addresses. Note that
16604 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16605 * after the boot processor executes this code.
16606 */
16607
16608 - /* Enable PAE mode and PGE */
16609 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16610 + /* Enable PAE mode and PSE/PGE */
16611 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16612 movq %rax, %cr4
16613
16614 /* Setup early boot stage 4 level pagetables. */
16615 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16616 movl $MSR_EFER, %ecx
16617 rdmsr
16618 btsl $_EFER_SCE, %eax /* Enable System Call */
16619 - btl $20,%edi /* No Execute supported? */
16620 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16621 jnc 1f
16622 btsl $_EFER_NX, %eax
16623 + leaq init_level4_pgt(%rip), %rdi
16624 +#ifndef CONFIG_EFI
16625 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16626 +#endif
16627 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16628 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16629 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16630 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16631 1: wrmsr /* Make changes effective */
16632
16633 /* Setup cr0 */
16634 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16635 * jump. In addition we need to ensure %cs is set so we make this
16636 * a far return.
16637 */
16638 + pax_set_fptr_mask
16639 movq initial_code(%rip),%rax
16640 pushq $0 # fake return address to stop unwinder
16641 pushq $__KERNEL_CS # set correct cs
16642 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16643 bad_address:
16644 jmp bad_address
16645
16646 - .section ".init.text","ax"
16647 + __INIT
16648 #ifdef CONFIG_EARLY_PRINTK
16649 .globl early_idt_handlers
16650 early_idt_handlers:
16651 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16652 #endif /* EARLY_PRINTK */
16653 1: hlt
16654 jmp 1b
16655 + .previous
16656
16657 #ifdef CONFIG_EARLY_PRINTK
16658 + __INITDATA
16659 early_recursion_flag:
16660 .long 0
16661 + .previous
16662
16663 + .section .rodata,"a",@progbits
16664 early_idt_msg:
16665 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16666 early_idt_ripmsg:
16667 .asciz "RIP %s\n"
16668 + .previous
16669 #endif /* CONFIG_EARLY_PRINTK */
16670 - .previous
16671
16672 + .section .rodata,"a",@progbits
16673 #define NEXT_PAGE(name) \
16674 .balign PAGE_SIZE; \
16675 ENTRY(name)
16676 @@ -338,7 +348,6 @@ ENTRY(name)
16677 i = i + 1 ; \
16678 .endr
16679
16680 - .data
16681 /*
16682 * This default setting generates an ident mapping at address 0x100000
16683 * and a mapping for the kernel that precisely maps virtual address
16684 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16685 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16686 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16687 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16688 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
16689 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16690 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
16691 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16692 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16693 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16694 .org init_level4_pgt + L4_START_KERNEL*8, 0
16695 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16696 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16697
16698 +#ifdef CONFIG_PAX_PER_CPU_PGD
16699 +NEXT_PAGE(cpu_pgd)
16700 + .rept NR_CPUS
16701 + .fill 512,8,0
16702 + .endr
16703 +#endif
16704 +
16705 NEXT_PAGE(level3_ident_pgt)
16706 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16707 +#ifdef CONFIG_XEN
16708 .fill 511,8,0
16709 +#else
16710 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16711 + .fill 510,8,0
16712 +#endif
16713 +
16714 +NEXT_PAGE(level3_vmalloc_start_pgt)
16715 + .fill 512,8,0
16716 +
16717 +NEXT_PAGE(level3_vmalloc_end_pgt)
16718 + .fill 512,8,0
16719 +
16720 +NEXT_PAGE(level3_vmemmap_pgt)
16721 + .fill L3_VMEMMAP_START,8,0
16722 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16723
16724 NEXT_PAGE(level3_kernel_pgt)
16725 .fill L3_START_KERNEL,8,0
16726 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16727 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16728 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16729
16730 +NEXT_PAGE(level2_vmemmap_pgt)
16731 + .fill 512,8,0
16732 +
16733 NEXT_PAGE(level2_fixmap_pgt)
16734 - .fill 506,8,0
16735 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16736 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16737 - .fill 5,8,0
16738 + .fill 507,8,0
16739 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16740 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16741 + .fill 4,8,0
16742
16743 -NEXT_PAGE(level1_fixmap_pgt)
16744 +NEXT_PAGE(level1_vsyscall_pgt)
16745 .fill 512,8,0
16746
16747 -NEXT_PAGE(level2_ident_pgt)
16748 - /* Since I easily can, map the first 1G.
16749 + /* Since I easily can, map the first 2G.
16750 * Don't set NX because code runs from these pages.
16751 */
16752 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16753 +NEXT_PAGE(level2_ident_pgt)
16754 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16755
16756 NEXT_PAGE(level2_kernel_pgt)
16757 /*
16758 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
16759 * If you want to increase this then increase MODULES_VADDR
16760 * too.)
16761 */
16762 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16763 - KERNEL_IMAGE_SIZE/PMD_SIZE)
16764 -
16765 -NEXT_PAGE(level2_spare_pgt)
16766 - .fill 512, 8, 0
16767 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16768
16769 #undef PMDS
16770 #undef NEXT_PAGE
16771
16772 - .data
16773 + .align PAGE_SIZE
16774 +ENTRY(cpu_gdt_table)
16775 + .rept NR_CPUS
16776 + .quad 0x0000000000000000 /* NULL descriptor */
16777 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16778 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
16779 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
16780 + .quad 0x00cffb000000ffff /* __USER32_CS */
16781 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16782 + .quad 0x00affb000000ffff /* __USER_CS */
16783 +
16784 +#ifdef CONFIG_PAX_KERNEXEC
16785 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16786 +#else
16787 + .quad 0x0 /* unused */
16788 +#endif
16789 +
16790 + .quad 0,0 /* TSS */
16791 + .quad 0,0 /* LDT */
16792 + .quad 0,0,0 /* three TLS descriptors */
16793 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
16794 + /* asm/segment.h:GDT_ENTRIES must match this */
16795 +
16796 + /* zero the remaining page */
16797 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16798 + .endr
16799 +
16800 .align 16
16801 .globl early_gdt_descr
16802 early_gdt_descr:
16803 .word GDT_ENTRIES*8-1
16804 early_gdt_descr_base:
16805 - .quad INIT_PER_CPU_VAR(gdt_page)
16806 + .quad cpu_gdt_table
16807
16808 ENTRY(phys_base)
16809 /* This must match the first entry in level2_kernel_pgt */
16810 .quad 0x0000000000000000
16811
16812 #include "../../x86/xen/xen-head.S"
16813 -
16814 - .section .bss, "aw", @nobits
16815 +
16816 + .section .rodata,"a",@progbits
16817 .align L1_CACHE_BYTES
16818 ENTRY(idt_table)
16819 - .skip IDT_ENTRIES * 16
16820 + .fill 512,8,0
16821
16822 .align L1_CACHE_BYTES
16823 ENTRY(nmi_idt_table)
16824 - .skip IDT_ENTRIES * 16
16825 + .fill 512,8,0
16826
16827 __PAGE_ALIGNED_BSS
16828 .align PAGE_SIZE
16829 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16830 index 9c3bd4a..e1d9b35 100644
16831 --- a/arch/x86/kernel/i386_ksyms_32.c
16832 +++ b/arch/x86/kernel/i386_ksyms_32.c
16833 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16834 EXPORT_SYMBOL(cmpxchg8b_emu);
16835 #endif
16836
16837 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
16838 +
16839 /* Networking helper routines. */
16840 EXPORT_SYMBOL(csum_partial_copy_generic);
16841 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16842 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16843
16844 EXPORT_SYMBOL(__get_user_1);
16845 EXPORT_SYMBOL(__get_user_2);
16846 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
16847
16848 EXPORT_SYMBOL(csum_partial);
16849 EXPORT_SYMBOL(empty_zero_page);
16850 +
16851 +#ifdef CONFIG_PAX_KERNEXEC
16852 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
16853 +#endif
16854 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
16855 index 6104852..6114160 100644
16856 --- a/arch/x86/kernel/i8259.c
16857 +++ b/arch/x86/kernel/i8259.c
16858 @@ -210,7 +210,7 @@ spurious_8259A_irq:
16859 "spurious 8259A interrupt: IRQ%d.\n", irq);
16860 spurious_irq_mask |= irqmask;
16861 }
16862 - atomic_inc(&irq_err_count);
16863 + atomic_inc_unchecked(&irq_err_count);
16864 /*
16865 * Theoretically we do not have to handle this IRQ,
16866 * but in Linux this does not cause problems and is
16867 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
16868 index 43e9ccf..44ccf6f 100644
16869 --- a/arch/x86/kernel/init_task.c
16870 +++ b/arch/x86/kernel/init_task.c
16871 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16872 * way process stacks are handled. This is done by having a special
16873 * "init_task" linker map entry..
16874 */
16875 -union thread_union init_thread_union __init_task_data =
16876 - { INIT_THREAD_INFO(init_task) };
16877 +union thread_union init_thread_union __init_task_data;
16878
16879 /*
16880 * Initial task structure.
16881 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
16882 * section. Since TSS's are completely CPU-local, we want them
16883 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
16884 */
16885 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
16886 -
16887 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
16888 +EXPORT_SYMBOL(init_tss);
16889 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
16890 index 8c96897..be66bfa 100644
16891 --- a/arch/x86/kernel/ioport.c
16892 +++ b/arch/x86/kernel/ioport.c
16893 @@ -6,6 +6,7 @@
16894 #include <linux/sched.h>
16895 #include <linux/kernel.h>
16896 #include <linux/capability.h>
16897 +#include <linux/security.h>
16898 #include <linux/errno.h>
16899 #include <linux/types.h>
16900 #include <linux/ioport.h>
16901 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16902
16903 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
16904 return -EINVAL;
16905 +#ifdef CONFIG_GRKERNSEC_IO
16906 + if (turn_on && grsec_disable_privio) {
16907 + gr_handle_ioperm();
16908 + return -EPERM;
16909 + }
16910 +#endif
16911 if (turn_on && !capable(CAP_SYS_RAWIO))
16912 return -EPERM;
16913
16914 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16915 * because the ->io_bitmap_max value must match the bitmap
16916 * contents:
16917 */
16918 - tss = &per_cpu(init_tss, get_cpu());
16919 + tss = init_tss + get_cpu();
16920
16921 if (turn_on)
16922 bitmap_clear(t->io_bitmap_ptr, from, num);
16923 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
16924 return -EINVAL;
16925 /* Trying to gain more privileges? */
16926 if (level > old) {
16927 +#ifdef CONFIG_GRKERNSEC_IO
16928 + if (grsec_disable_privio) {
16929 + gr_handle_iopl();
16930 + return -EPERM;
16931 + }
16932 +#endif
16933 if (!capable(CAP_SYS_RAWIO))
16934 return -EPERM;
16935 }
16936 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
16937 index 7943e0c..dd32c5c 100644
16938 --- a/arch/x86/kernel/irq.c
16939 +++ b/arch/x86/kernel/irq.c
16940 @@ -18,7 +18,7 @@
16941 #include <asm/mce.h>
16942 #include <asm/hw_irq.h>
16943
16944 -atomic_t irq_err_count;
16945 +atomic_unchecked_t irq_err_count;
16946
16947 /* Function pointer for generic interrupt vector handling */
16948 void (*x86_platform_ipi_callback)(void) = NULL;
16949 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
16950 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
16951 seq_printf(p, " Machine check polls\n");
16952 #endif
16953 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
16954 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
16955 #if defined(CONFIG_X86_IO_APIC)
16956 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
16957 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
16958 #endif
16959 return 0;
16960 }
16961 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
16962
16963 u64 arch_irq_stat(void)
16964 {
16965 - u64 sum = atomic_read(&irq_err_count);
16966 + u64 sum = atomic_read_unchecked(&irq_err_count);
16967
16968 #ifdef CONFIG_X86_IO_APIC
16969 - sum += atomic_read(&irq_mis_count);
16970 + sum += atomic_read_unchecked(&irq_mis_count);
16971 #endif
16972 return sum;
16973 }
16974 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
16975 index 40fc861..9b8739b 100644
16976 --- a/arch/x86/kernel/irq_32.c
16977 +++ b/arch/x86/kernel/irq_32.c
16978 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
16979 __asm__ __volatile__("andl %%esp,%0" :
16980 "=r" (sp) : "0" (THREAD_SIZE - 1));
16981
16982 - return sp < (sizeof(struct thread_info) + STACK_WARN);
16983 + return sp < STACK_WARN;
16984 }
16985
16986 static void print_stack_overflow(void)
16987 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
16988 * per-CPU IRQ handling contexts (thread information and stack)
16989 */
16990 union irq_ctx {
16991 - struct thread_info tinfo;
16992 - u32 stack[THREAD_SIZE/sizeof(u32)];
16993 + unsigned long previous_esp;
16994 + u32 stack[THREAD_SIZE/sizeof(u32)];
16995 } __attribute__((aligned(THREAD_SIZE)));
16996
16997 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
16998 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
16999 static inline int
17000 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17001 {
17002 - union irq_ctx *curctx, *irqctx;
17003 + union irq_ctx *irqctx;
17004 u32 *isp, arg1, arg2;
17005
17006 - curctx = (union irq_ctx *) current_thread_info();
17007 irqctx = __this_cpu_read(hardirq_ctx);
17008
17009 /*
17010 @@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17011 * handler) we can't do that and just have to keep using the
17012 * current stack (which is the irq stack already after all)
17013 */
17014 - if (unlikely(curctx == irqctx))
17015 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17016 return 0;
17017
17018 /* build the stack frame on the IRQ stack */
17019 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17020 - irqctx->tinfo.task = curctx->tinfo.task;
17021 - irqctx->tinfo.previous_esp = current_stack_pointer;
17022 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17023 + irqctx->previous_esp = current_stack_pointer;
17024
17025 - /*
17026 - * Copy the softirq bits in preempt_count so that the
17027 - * softirq checks work in the hardirq context.
17028 - */
17029 - irqctx->tinfo.preempt_count =
17030 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17031 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17032 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17033 + __set_fs(MAKE_MM_SEG(0));
17034 +#endif
17035
17036 if (unlikely(overflow))
17037 call_on_stack(print_stack_overflow, isp);
17038 @@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17039 : "0" (irq), "1" (desc), "2" (isp),
17040 "D" (desc->handle_irq)
17041 : "memory", "cc", "ecx");
17042 +
17043 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17044 + __set_fs(current_thread_info()->addr_limit);
17045 +#endif
17046 +
17047 return 1;
17048 }
17049
17050 @@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17051 */
17052 void __cpuinit irq_ctx_init(int cpu)
17053 {
17054 - union irq_ctx *irqctx;
17055 -
17056 if (per_cpu(hardirq_ctx, cpu))
17057 return;
17058
17059 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17060 - THREAD_FLAGS,
17061 - THREAD_ORDER));
17062 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17063 - irqctx->tinfo.cpu = cpu;
17064 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17065 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17066 -
17067 - per_cpu(hardirq_ctx, cpu) = irqctx;
17068 -
17069 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17070 - THREAD_FLAGS,
17071 - THREAD_ORDER));
17072 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17073 - irqctx->tinfo.cpu = cpu;
17074 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17075 -
17076 - per_cpu(softirq_ctx, cpu) = irqctx;
17077 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17078 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17079
17080 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17081 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17082 @@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17083 asmlinkage void do_softirq(void)
17084 {
17085 unsigned long flags;
17086 - struct thread_info *curctx;
17087 union irq_ctx *irqctx;
17088 u32 *isp;
17089
17090 @@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17091 local_irq_save(flags);
17092
17093 if (local_softirq_pending()) {
17094 - curctx = current_thread_info();
17095 irqctx = __this_cpu_read(softirq_ctx);
17096 - irqctx->tinfo.task = curctx->task;
17097 - irqctx->tinfo.previous_esp = current_stack_pointer;
17098 + irqctx->previous_esp = current_stack_pointer;
17099
17100 /* build the stack frame on the softirq stack */
17101 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17102 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17103 +
17104 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17105 + __set_fs(MAKE_MM_SEG(0));
17106 +#endif
17107
17108 call_on_stack(__do_softirq, isp);
17109 +
17110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17111 + __set_fs(current_thread_info()->addr_limit);
17112 +#endif
17113 +
17114 /*
17115 * Shouldn't happen, we returned above if in_interrupt():
17116 */
17117 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17118 index d04d3ec..ea4b374 100644
17119 --- a/arch/x86/kernel/irq_64.c
17120 +++ b/arch/x86/kernel/irq_64.c
17121 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17122 u64 estack_top, estack_bottom;
17123 u64 curbase = (u64)task_stack_page(current);
17124
17125 - if (user_mode_vm(regs))
17126 + if (user_mode(regs))
17127 return;
17128
17129 if (regs->sp >= curbase + sizeof(struct thread_info) +
17130 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17131 index faba577..93b9e71 100644
17132 --- a/arch/x86/kernel/kgdb.c
17133 +++ b/arch/x86/kernel/kgdb.c
17134 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17135 #ifdef CONFIG_X86_32
17136 switch (regno) {
17137 case GDB_SS:
17138 - if (!user_mode_vm(regs))
17139 + if (!user_mode(regs))
17140 *(unsigned long *)mem = __KERNEL_DS;
17141 break;
17142 case GDB_SP:
17143 - if (!user_mode_vm(regs))
17144 + if (!user_mode(regs))
17145 *(unsigned long *)mem = kernel_stack_pointer(regs);
17146 break;
17147 case GDB_GS:
17148 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17149 case 'k':
17150 /* clear the trace bit */
17151 linux_regs->flags &= ~X86_EFLAGS_TF;
17152 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17153 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17154
17155 /* set the trace bit if we're stepping */
17156 if (remcomInBuffer[0] == 's') {
17157 linux_regs->flags |= X86_EFLAGS_TF;
17158 - atomic_set(&kgdb_cpu_doing_single_step,
17159 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17160 raw_smp_processor_id());
17161 }
17162
17163 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17164
17165 switch (cmd) {
17166 case DIE_DEBUG:
17167 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17168 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17169 if (user_mode(regs))
17170 return single_step_cont(regs, args);
17171 break;
17172 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17173 index 7da647d..56fe348 100644
17174 --- a/arch/x86/kernel/kprobes.c
17175 +++ b/arch/x86/kernel/kprobes.c
17176 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17177 } __attribute__((packed)) *insn;
17178
17179 insn = (struct __arch_relative_insn *)from;
17180 +
17181 + pax_open_kernel();
17182 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17183 insn->op = op;
17184 + pax_close_kernel();
17185 }
17186
17187 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17188 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17189 kprobe_opcode_t opcode;
17190 kprobe_opcode_t *orig_opcodes = opcodes;
17191
17192 - if (search_exception_tables((unsigned long)opcodes))
17193 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17194 return 0; /* Page fault may occur on this address. */
17195
17196 retry:
17197 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17198 }
17199 }
17200 insn_get_length(&insn);
17201 + pax_open_kernel();
17202 memcpy(dest, insn.kaddr, insn.length);
17203 + pax_close_kernel();
17204
17205 #ifdef CONFIG_X86_64
17206 if (insn_rip_relative(&insn)) {
17207 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17208 (u8 *) dest;
17209 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17210 disp = (u8 *) dest + insn_offset_displacement(&insn);
17211 + pax_open_kernel();
17212 *(s32 *) disp = (s32) newdisp;
17213 + pax_close_kernel();
17214 }
17215 #endif
17216 return insn.length;
17217 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17218 */
17219 __copy_instruction(p->ainsn.insn, p->addr, 0);
17220
17221 - if (can_boost(p->addr))
17222 + if (can_boost(ktla_ktva(p->addr)))
17223 p->ainsn.boostable = 0;
17224 else
17225 p->ainsn.boostable = -1;
17226
17227 - p->opcode = *p->addr;
17228 + p->opcode = *(ktla_ktva(p->addr));
17229 }
17230
17231 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17232 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17233 * nor set current_kprobe, because it doesn't use single
17234 * stepping.
17235 */
17236 - regs->ip = (unsigned long)p->ainsn.insn;
17237 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17238 preempt_enable_no_resched();
17239 return;
17240 }
17241 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17242 if (p->opcode == BREAKPOINT_INSTRUCTION)
17243 regs->ip = (unsigned long)p->addr;
17244 else
17245 - regs->ip = (unsigned long)p->ainsn.insn;
17246 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17247 }
17248
17249 /*
17250 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17251 setup_singlestep(p, regs, kcb, 0);
17252 return 1;
17253 }
17254 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17255 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17256 /*
17257 * The breakpoint instruction was removed right
17258 * after we hit it. Another cpu has removed
17259 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17260 " movq %rax, 152(%rsp)\n"
17261 RESTORE_REGS_STRING
17262 " popfq\n"
17263 +#ifdef KERNEXEC_PLUGIN
17264 + " btsq $63,(%rsp)\n"
17265 +#endif
17266 #else
17267 " pushf\n"
17268 SAVE_REGS_STRING
17269 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17270 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17271 {
17272 unsigned long *tos = stack_addr(regs);
17273 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17274 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17275 unsigned long orig_ip = (unsigned long)p->addr;
17276 kprobe_opcode_t *insn = p->ainsn.insn;
17277
17278 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17279 struct die_args *args = data;
17280 int ret = NOTIFY_DONE;
17281
17282 - if (args->regs && user_mode_vm(args->regs))
17283 + if (args->regs && user_mode(args->regs))
17284 return ret;
17285
17286 switch (val) {
17287 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17288 * Verify if the address gap is in 2GB range, because this uses
17289 * a relative jump.
17290 */
17291 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17292 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17293 if (abs(rel) > 0x7fffffff)
17294 return -ERANGE;
17295
17296 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17297 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17298
17299 /* Set probe function call */
17300 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17301 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17302
17303 /* Set returning jmp instruction at the tail of out-of-line buffer */
17304 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17305 - (u8 *)op->kp.addr + op->optinsn.size);
17306 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17307
17308 flush_icache_range((unsigned long) buf,
17309 (unsigned long) buf + TMPL_END_IDX +
17310 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17311 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17312
17313 /* Backup instructions which will be replaced by jump address */
17314 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17315 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17316 RELATIVE_ADDR_SIZE);
17317
17318 insn_buf[0] = RELATIVEJUMP_OPCODE;
17319 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17320 index ea69726..604d066 100644
17321 --- a/arch/x86/kernel/ldt.c
17322 +++ b/arch/x86/kernel/ldt.c
17323 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17324 if (reload) {
17325 #ifdef CONFIG_SMP
17326 preempt_disable();
17327 - load_LDT(pc);
17328 + load_LDT_nolock(pc);
17329 if (!cpumask_equal(mm_cpumask(current->mm),
17330 cpumask_of(smp_processor_id())))
17331 smp_call_function(flush_ldt, current->mm, 1);
17332 preempt_enable();
17333 #else
17334 - load_LDT(pc);
17335 + load_LDT_nolock(pc);
17336 #endif
17337 }
17338 if (oldsize) {
17339 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17340 return err;
17341
17342 for (i = 0; i < old->size; i++)
17343 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17344 + write_ldt_entry(new->ldt, i, old->ldt + i);
17345 return 0;
17346 }
17347
17348 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17349 retval = copy_ldt(&mm->context, &old_mm->context);
17350 mutex_unlock(&old_mm->context.lock);
17351 }
17352 +
17353 + if (tsk == current) {
17354 + mm->context.vdso = 0;
17355 +
17356 +#ifdef CONFIG_X86_32
17357 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17358 + mm->context.user_cs_base = 0UL;
17359 + mm->context.user_cs_limit = ~0UL;
17360 +
17361 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17362 + cpus_clear(mm->context.cpu_user_cs_mask);
17363 +#endif
17364 +
17365 +#endif
17366 +#endif
17367 +
17368 + }
17369 +
17370 return retval;
17371 }
17372
17373 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17374 }
17375 }
17376
17377 +#ifdef CONFIG_PAX_SEGMEXEC
17378 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17379 + error = -EINVAL;
17380 + goto out_unlock;
17381 + }
17382 +#endif
17383 +
17384 fill_ldt(&ldt, &ldt_info);
17385 if (oldmode)
17386 ldt.avl = 0;
17387 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17388 index a3fa43b..8966f4c 100644
17389 --- a/arch/x86/kernel/machine_kexec_32.c
17390 +++ b/arch/x86/kernel/machine_kexec_32.c
17391 @@ -27,7 +27,7 @@
17392 #include <asm/cacheflush.h>
17393 #include <asm/debugreg.h>
17394
17395 -static void set_idt(void *newidt, __u16 limit)
17396 +static void set_idt(struct desc_struct *newidt, __u16 limit)
17397 {
17398 struct desc_ptr curidt;
17399
17400 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17401 }
17402
17403
17404 -static void set_gdt(void *newgdt, __u16 limit)
17405 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17406 {
17407 struct desc_ptr curgdt;
17408
17409 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17410 }
17411
17412 control_page = page_address(image->control_code_page);
17413 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17414 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17415
17416 relocate_kernel_ptr = control_page;
17417 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17418 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17419 index 3ca42d0..7cff8cc 100644
17420 --- a/arch/x86/kernel/microcode_intel.c
17421 +++ b/arch/x86/kernel/microcode_intel.c
17422 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17423
17424 static int get_ucode_user(void *to, const void *from, size_t n)
17425 {
17426 - return copy_from_user(to, from, n);
17427 + return copy_from_user(to, (const void __force_user *)from, n);
17428 }
17429
17430 static enum ucode_state
17431 request_microcode_user(int cpu, const void __user *buf, size_t size)
17432 {
17433 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17434 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17435 }
17436
17437 static void microcode_fini_cpu(int cpu)
17438 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17439 index 925179f..267ac7a 100644
17440 --- a/arch/x86/kernel/module.c
17441 +++ b/arch/x86/kernel/module.c
17442 @@ -36,15 +36,60 @@
17443 #define DEBUGP(fmt...)
17444 #endif
17445
17446 -void *module_alloc(unsigned long size)
17447 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17448 {
17449 - if (PAGE_ALIGN(size) > MODULES_LEN)
17450 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17451 return NULL;
17452 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17453 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17454 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17455 -1, __builtin_return_address(0));
17456 }
17457
17458 +void *module_alloc(unsigned long size)
17459 +{
17460 +
17461 +#ifdef CONFIG_PAX_KERNEXEC
17462 + return __module_alloc(size, PAGE_KERNEL);
17463 +#else
17464 + return __module_alloc(size, PAGE_KERNEL_EXEC);
17465 +#endif
17466 +
17467 +}
17468 +
17469 +#ifdef CONFIG_PAX_KERNEXEC
17470 +#ifdef CONFIG_X86_32
17471 +void *module_alloc_exec(unsigned long size)
17472 +{
17473 + struct vm_struct *area;
17474 +
17475 + if (size == 0)
17476 + return NULL;
17477 +
17478 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17479 + return area ? area->addr : NULL;
17480 +}
17481 +EXPORT_SYMBOL(module_alloc_exec);
17482 +
17483 +void module_free_exec(struct module *mod, void *module_region)
17484 +{
17485 + vunmap(module_region);
17486 +}
17487 +EXPORT_SYMBOL(module_free_exec);
17488 +#else
17489 +void module_free_exec(struct module *mod, void *module_region)
17490 +{
17491 + module_free(mod, module_region);
17492 +}
17493 +EXPORT_SYMBOL(module_free_exec);
17494 +
17495 +void *module_alloc_exec(unsigned long size)
17496 +{
17497 + return __module_alloc(size, PAGE_KERNEL_RX);
17498 +}
17499 +EXPORT_SYMBOL(module_alloc_exec);
17500 +#endif
17501 +#endif
17502 +
17503 #ifdef CONFIG_X86_32
17504 int apply_relocate(Elf32_Shdr *sechdrs,
17505 const char *strtab,
17506 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17507 unsigned int i;
17508 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17509 Elf32_Sym *sym;
17510 - uint32_t *location;
17511 + uint32_t *plocation, location;
17512
17513 DEBUGP("Applying relocate section %u to %u\n", relsec,
17514 sechdrs[relsec].sh_info);
17515 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17516 /* This is where to make the change */
17517 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17518 - + rel[i].r_offset;
17519 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17520 + location = (uint32_t)plocation;
17521 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17522 + plocation = ktla_ktva((void *)plocation);
17523 /* This is the symbol it is referring to. Note that all
17524 undefined symbols have been resolved. */
17525 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17526 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17527 switch (ELF32_R_TYPE(rel[i].r_info)) {
17528 case R_386_32:
17529 /* We add the value into the location given */
17530 - *location += sym->st_value;
17531 + pax_open_kernel();
17532 + *plocation += sym->st_value;
17533 + pax_close_kernel();
17534 break;
17535 case R_386_PC32:
17536 /* Add the value, subtract its postition */
17537 - *location += sym->st_value - (uint32_t)location;
17538 + pax_open_kernel();
17539 + *plocation += sym->st_value - location;
17540 + pax_close_kernel();
17541 break;
17542 default:
17543 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17544 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17545 case R_X86_64_NONE:
17546 break;
17547 case R_X86_64_64:
17548 + pax_open_kernel();
17549 *(u64 *)loc = val;
17550 + pax_close_kernel();
17551 break;
17552 case R_X86_64_32:
17553 + pax_open_kernel();
17554 *(u32 *)loc = val;
17555 + pax_close_kernel();
17556 if (val != *(u32 *)loc)
17557 goto overflow;
17558 break;
17559 case R_X86_64_32S:
17560 + pax_open_kernel();
17561 *(s32 *)loc = val;
17562 + pax_close_kernel();
17563 if ((s64)val != *(s32 *)loc)
17564 goto overflow;
17565 break;
17566 case R_X86_64_PC32:
17567 val -= (u64)loc;
17568 + pax_open_kernel();
17569 *(u32 *)loc = val;
17570 + pax_close_kernel();
17571 +
17572 #if 0
17573 if ((s64)val != *(s32 *)loc)
17574 goto overflow;
17575 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17576 index 47acaf3..ec48ab6 100644
17577 --- a/arch/x86/kernel/nmi.c
17578 +++ b/arch/x86/kernel/nmi.c
17579 @@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
17580 dotraplinkage notrace __kprobes void
17581 do_nmi(struct pt_regs *regs, long error_code)
17582 {
17583 +
17584 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17585 + if (!user_mode(regs)) {
17586 + unsigned long cs = regs->cs & 0xFFFF;
17587 + unsigned long ip = ktva_ktla(regs->ip);
17588 +
17589 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17590 + regs->ip = ip;
17591 + }
17592 +#endif
17593 +
17594 nmi_nesting_preprocess(regs);
17595
17596 nmi_enter();
17597 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17598 index 676b8c7..870ba04 100644
17599 --- a/arch/x86/kernel/paravirt-spinlocks.c
17600 +++ b/arch/x86/kernel/paravirt-spinlocks.c
17601 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17602 arch_spin_lock(lock);
17603 }
17604
17605 -struct pv_lock_ops pv_lock_ops = {
17606 +struct pv_lock_ops pv_lock_ops __read_only = {
17607 #ifdef CONFIG_SMP
17608 .spin_is_locked = __ticket_spin_is_locked,
17609 .spin_is_contended = __ticket_spin_is_contended,
17610 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17611 index d90272e..6bb013b 100644
17612 --- a/arch/x86/kernel/paravirt.c
17613 +++ b/arch/x86/kernel/paravirt.c
17614 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17615 {
17616 return x;
17617 }
17618 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17619 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17620 +#endif
17621
17622 void __init default_banner(void)
17623 {
17624 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17625 if (opfunc == NULL)
17626 /* If there's no function, patch it with a ud2a (BUG) */
17627 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17628 - else if (opfunc == _paravirt_nop)
17629 + else if (opfunc == (void *)_paravirt_nop)
17630 /* If the operation is a nop, then nop the callsite */
17631 ret = paravirt_patch_nop();
17632
17633 /* identity functions just return their single argument */
17634 - else if (opfunc == _paravirt_ident_32)
17635 + else if (opfunc == (void *)_paravirt_ident_32)
17636 ret = paravirt_patch_ident_32(insnbuf, len);
17637 - else if (opfunc == _paravirt_ident_64)
17638 + else if (opfunc == (void *)_paravirt_ident_64)
17639 ret = paravirt_patch_ident_64(insnbuf, len);
17640 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17641 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17642 + ret = paravirt_patch_ident_64(insnbuf, len);
17643 +#endif
17644
17645 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17646 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17647 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17648 if (insn_len > len || start == NULL)
17649 insn_len = len;
17650 else
17651 - memcpy(insnbuf, start, insn_len);
17652 + memcpy(insnbuf, ktla_ktva(start), insn_len);
17653
17654 return insn_len;
17655 }
17656 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17657 preempt_enable();
17658 }
17659
17660 -struct pv_info pv_info = {
17661 +struct pv_info pv_info __read_only = {
17662 .name = "bare hardware",
17663 .paravirt_enabled = 0,
17664 .kernel_rpl = 0,
17665 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
17666 #endif
17667 };
17668
17669 -struct pv_init_ops pv_init_ops = {
17670 +struct pv_init_ops pv_init_ops __read_only = {
17671 .patch = native_patch,
17672 };
17673
17674 -struct pv_time_ops pv_time_ops = {
17675 +struct pv_time_ops pv_time_ops __read_only = {
17676 .sched_clock = native_sched_clock,
17677 .steal_clock = native_steal_clock,
17678 };
17679
17680 -struct pv_irq_ops pv_irq_ops = {
17681 +struct pv_irq_ops pv_irq_ops __read_only = {
17682 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17683 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17684 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17685 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17686 #endif
17687 };
17688
17689 -struct pv_cpu_ops pv_cpu_ops = {
17690 +struct pv_cpu_ops pv_cpu_ops __read_only = {
17691 .cpuid = native_cpuid,
17692 .get_debugreg = native_get_debugreg,
17693 .set_debugreg = native_set_debugreg,
17694 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17695 .end_context_switch = paravirt_nop,
17696 };
17697
17698 -struct pv_apic_ops pv_apic_ops = {
17699 +struct pv_apic_ops pv_apic_ops __read_only = {
17700 #ifdef CONFIG_X86_LOCAL_APIC
17701 .startup_ipi_hook = paravirt_nop,
17702 #endif
17703 };
17704
17705 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17706 +#ifdef CONFIG_X86_32
17707 +#ifdef CONFIG_X86_PAE
17708 +/* 64-bit pagetable entries */
17709 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17710 +#else
17711 /* 32-bit pagetable entries */
17712 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17713 +#endif
17714 #else
17715 /* 64-bit pagetable entries */
17716 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17717 #endif
17718
17719 -struct pv_mmu_ops pv_mmu_ops = {
17720 +struct pv_mmu_ops pv_mmu_ops __read_only = {
17721
17722 .read_cr2 = native_read_cr2,
17723 .write_cr2 = native_write_cr2,
17724 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17725 .make_pud = PTE_IDENT,
17726
17727 .set_pgd = native_set_pgd,
17728 + .set_pgd_batched = native_set_pgd_batched,
17729 #endif
17730 #endif /* PAGETABLE_LEVELS >= 3 */
17731
17732 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17733 },
17734
17735 .set_fixmap = native_set_fixmap,
17736 +
17737 +#ifdef CONFIG_PAX_KERNEXEC
17738 + .pax_open_kernel = native_pax_open_kernel,
17739 + .pax_close_kernel = native_pax_close_kernel,
17740 +#endif
17741 +
17742 };
17743
17744 EXPORT_SYMBOL_GPL(pv_time_ops);
17745 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17746 index 35ccf75..7a15747 100644
17747 --- a/arch/x86/kernel/pci-iommu_table.c
17748 +++ b/arch/x86/kernel/pci-iommu_table.c
17749 @@ -2,7 +2,7 @@
17750 #include <asm/iommu_table.h>
17751 #include <linux/string.h>
17752 #include <linux/kallsyms.h>
17753 -
17754 +#include <linux/sched.h>
17755
17756 #define DEBUG 1
17757
17758 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
17759 index 15763af..da59ada 100644
17760 --- a/arch/x86/kernel/process.c
17761 +++ b/arch/x86/kernel/process.c
17762 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
17763
17764 void free_thread_info(struct thread_info *ti)
17765 {
17766 - free_thread_xstate(ti->task);
17767 free_pages((unsigned long)ti, THREAD_ORDER);
17768 }
17769
17770 +static struct kmem_cache *task_struct_cachep;
17771 +
17772 void arch_task_cache_init(void)
17773 {
17774 - task_xstate_cachep =
17775 - kmem_cache_create("task_xstate", xstate_size,
17776 + /* create a slab on which task_structs can be allocated */
17777 + task_struct_cachep =
17778 + kmem_cache_create("task_struct", sizeof(struct task_struct),
17779 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
17780 +
17781 + task_xstate_cachep =
17782 + kmem_cache_create("task_xstate", xstate_size,
17783 __alignof__(union thread_xstate),
17784 - SLAB_PANIC | SLAB_NOTRACK, NULL);
17785 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
17786 +}
17787 +
17788 +struct task_struct *alloc_task_struct_node(int node)
17789 +{
17790 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
17791 +}
17792 +
17793 +void free_task_struct(struct task_struct *task)
17794 +{
17795 + free_thread_xstate(task);
17796 + kmem_cache_free(task_struct_cachep, task);
17797 }
17798
17799 /*
17800 @@ -70,7 +87,7 @@ void exit_thread(void)
17801 unsigned long *bp = t->io_bitmap_ptr;
17802
17803 if (bp) {
17804 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
17805 + struct tss_struct *tss = init_tss + get_cpu();
17806
17807 t->io_bitmap_ptr = NULL;
17808 clear_thread_flag(TIF_IO_BITMAP);
17809 @@ -106,7 +123,7 @@ void show_regs_common(void)
17810
17811 printk(KERN_CONT "\n");
17812 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
17813 - current->pid, current->comm, print_tainted(),
17814 + task_pid_nr(current), current->comm, print_tainted(),
17815 init_utsname()->release,
17816 (int)strcspn(init_utsname()->version, " "),
17817 init_utsname()->version);
17818 @@ -120,6 +137,9 @@ void flush_thread(void)
17819 {
17820 struct task_struct *tsk = current;
17821
17822 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17823 + loadsegment(gs, 0);
17824 +#endif
17825 flush_ptrace_hw_breakpoint(tsk);
17826 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
17827 /*
17828 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
17829 regs.di = (unsigned long) arg;
17830
17831 #ifdef CONFIG_X86_32
17832 - regs.ds = __USER_DS;
17833 - regs.es = __USER_DS;
17834 + regs.ds = __KERNEL_DS;
17835 + regs.es = __KERNEL_DS;
17836 regs.fs = __KERNEL_PERCPU;
17837 - regs.gs = __KERNEL_STACK_CANARY;
17838 + savesegment(gs, regs.gs);
17839 #else
17840 regs.ss = __KERNEL_DS;
17841 #endif
17842 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
17843
17844 return ret;
17845 }
17846 -void stop_this_cpu(void *dummy)
17847 +__noreturn void stop_this_cpu(void *dummy)
17848 {
17849 local_irq_disable();
17850 /*
17851 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
17852 }
17853 early_param("idle", idle_setup);
17854
17855 -unsigned long arch_align_stack(unsigned long sp)
17856 +#ifdef CONFIG_PAX_RANDKSTACK
17857 +void pax_randomize_kstack(struct pt_regs *regs)
17858 {
17859 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
17860 - sp -= get_random_int() % 8192;
17861 - return sp & ~0xf;
17862 -}
17863 + struct thread_struct *thread = &current->thread;
17864 + unsigned long time;
17865
17866 -unsigned long arch_randomize_brk(struct mm_struct *mm)
17867 -{
17868 - unsigned long range_end = mm->brk + 0x02000000;
17869 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
17870 -}
17871 + if (!randomize_va_space)
17872 + return;
17873 +
17874 + if (v8086_mode(regs))
17875 + return;
17876
17877 + rdtscl(time);
17878 +
17879 + /* P4 seems to return a 0 LSB, ignore it */
17880 +#ifdef CONFIG_MPENTIUM4
17881 + time &= 0x3EUL;
17882 + time <<= 2;
17883 +#elif defined(CONFIG_X86_64)
17884 + time &= 0xFUL;
17885 + time <<= 4;
17886 +#else
17887 + time &= 0x1FUL;
17888 + time <<= 3;
17889 +#endif
17890 +
17891 + thread->sp0 ^= time;
17892 + load_sp0(init_tss + smp_processor_id(), thread);
17893 +
17894 +#ifdef CONFIG_X86_64
17895 + percpu_write(kernel_stack, thread->sp0);
17896 +#endif
17897 +}
17898 +#endif
17899 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
17900 index c08d1ff..6ae1c81 100644
17901 --- a/arch/x86/kernel/process_32.c
17902 +++ b/arch/x86/kernel/process_32.c
17903 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
17904 unsigned long thread_saved_pc(struct task_struct *tsk)
17905 {
17906 return ((unsigned long *)tsk->thread.sp)[3];
17907 +//XXX return tsk->thread.eip;
17908 }
17909
17910 #ifndef CONFIG_SMP
17911 @@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
17912 unsigned long sp;
17913 unsigned short ss, gs;
17914
17915 - if (user_mode_vm(regs)) {
17916 + if (user_mode(regs)) {
17917 sp = regs->sp;
17918 ss = regs->ss & 0xffff;
17919 - gs = get_user_gs(regs);
17920 } else {
17921 sp = kernel_stack_pointer(regs);
17922 savesegment(ss, ss);
17923 - savesegment(gs, gs);
17924 }
17925 + gs = get_user_gs(regs);
17926
17927 show_regs_common();
17928
17929 @@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17930 struct task_struct *tsk;
17931 int err;
17932
17933 - childregs = task_pt_regs(p);
17934 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
17935 *childregs = *regs;
17936 childregs->ax = 0;
17937 childregs->sp = sp;
17938
17939 p->thread.sp = (unsigned long) childregs;
17940 p->thread.sp0 = (unsigned long) (childregs+1);
17941 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17942
17943 p->thread.ip = (unsigned long) ret_from_fork;
17944
17945 @@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17946 struct thread_struct *prev = &prev_p->thread,
17947 *next = &next_p->thread;
17948 int cpu = smp_processor_id();
17949 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
17950 + struct tss_struct *tss = init_tss + cpu;
17951 fpu_switch_t fpu;
17952
17953 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
17954 @@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17955 */
17956 lazy_save_gs(prev->gs);
17957
17958 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17959 + __set_fs(task_thread_info(next_p)->addr_limit);
17960 +#endif
17961 +
17962 /*
17963 * Load the per-thread Thread-Local Storage descriptor.
17964 */
17965 @@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17966 */
17967 arch_end_context_switch(next_p);
17968
17969 + percpu_write(current_task, next_p);
17970 + percpu_write(current_tinfo, &next_p->tinfo);
17971 +
17972 /*
17973 * Restore %gs if needed (which is common)
17974 */
17975 @@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17976
17977 switch_fpu_finish(next_p, fpu);
17978
17979 - percpu_write(current_task, next_p);
17980 -
17981 return prev_p;
17982 }
17983
17984 @@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
17985 } while (count++ < 16);
17986 return 0;
17987 }
17988 -
17989 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
17990 index cfa5c90..4facd28 100644
17991 --- a/arch/x86/kernel/process_64.c
17992 +++ b/arch/x86/kernel/process_64.c
17993 @@ -89,7 +89,7 @@ static void __exit_idle(void)
17994 void exit_idle(void)
17995 {
17996 /* idle loop has pid 0 */
17997 - if (current->pid)
17998 + if (task_pid_nr(current))
17999 return;
18000 __exit_idle();
18001 }
18002 @@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18003 struct pt_regs *childregs;
18004 struct task_struct *me = current;
18005
18006 - childregs = ((struct pt_regs *)
18007 - (THREAD_SIZE + task_stack_page(p))) - 1;
18008 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18009 *childregs = *regs;
18010
18011 childregs->ax = 0;
18012 @@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18013 p->thread.sp = (unsigned long) childregs;
18014 p->thread.sp0 = (unsigned long) (childregs+1);
18015 p->thread.usersp = me->thread.usersp;
18016 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18017
18018 set_tsk_thread_flag(p, TIF_FORK);
18019
18020 @@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18021 struct thread_struct *prev = &prev_p->thread;
18022 struct thread_struct *next = &next_p->thread;
18023 int cpu = smp_processor_id();
18024 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18025 + struct tss_struct *tss = init_tss + cpu;
18026 unsigned fsindex, gsindex;
18027 fpu_switch_t fpu;
18028
18029 @@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18030 prev->usersp = percpu_read(old_rsp);
18031 percpu_write(old_rsp, next->usersp);
18032 percpu_write(current_task, next_p);
18033 + percpu_write(current_tinfo, &next_p->tinfo);
18034
18035 - percpu_write(kernel_stack,
18036 - (unsigned long)task_stack_page(next_p) +
18037 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18038 + percpu_write(kernel_stack, next->sp0);
18039
18040 /*
18041 * Now maybe reload the debug registers and handle I/O bitmaps
18042 @@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18043 if (!p || p == current || p->state == TASK_RUNNING)
18044 return 0;
18045 stack = (unsigned long)task_stack_page(p);
18046 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18047 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18048 return 0;
18049 fp = *(u64 *)(p->thread.sp);
18050 do {
18051 - if (fp < (unsigned long)stack ||
18052 - fp >= (unsigned long)stack+THREAD_SIZE)
18053 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18054 return 0;
18055 ip = *(u64 *)(fp+8);
18056 if (!in_sched_functions(ip))
18057 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18058 index 5026738..9e6d6dc 100644
18059 --- a/arch/x86/kernel/ptrace.c
18060 +++ b/arch/x86/kernel/ptrace.c
18061 @@ -823,7 +823,7 @@ long arch_ptrace(struct task_struct *child, long request,
18062 unsigned long addr, unsigned long data)
18063 {
18064 int ret;
18065 - unsigned long __user *datap = (unsigned long __user *)data;
18066 + unsigned long __user *datap = (__force unsigned long __user *)data;
18067
18068 switch (request) {
18069 /* read the word at location addr in the USER area. */
18070 @@ -908,14 +908,14 @@ long arch_ptrace(struct task_struct *child, long request,
18071 if ((int) addr < 0)
18072 return -EIO;
18073 ret = do_get_thread_area(child, addr,
18074 - (struct user_desc __user *)data);
18075 + (__force struct user_desc __user *) data);
18076 break;
18077
18078 case PTRACE_SET_THREAD_AREA:
18079 if ((int) addr < 0)
18080 return -EIO;
18081 ret = do_set_thread_area(child, addr,
18082 - (struct user_desc __user *)data, 0);
18083 + (__force struct user_desc __user *) data, 0);
18084 break;
18085 #endif
18086
18087 @@ -1332,7 +1332,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18088 memset(info, 0, sizeof(*info));
18089 info->si_signo = SIGTRAP;
18090 info->si_code = si_code;
18091 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18092 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18093 }
18094
18095 void user_single_step_siginfo(struct task_struct *tsk,
18096 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18097 index 42eb330..139955c 100644
18098 --- a/arch/x86/kernel/pvclock.c
18099 +++ b/arch/x86/kernel/pvclock.c
18100 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18101 return pv_tsc_khz;
18102 }
18103
18104 -static atomic64_t last_value = ATOMIC64_INIT(0);
18105 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18106
18107 void pvclock_resume(void)
18108 {
18109 - atomic64_set(&last_value, 0);
18110 + atomic64_set_unchecked(&last_value, 0);
18111 }
18112
18113 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18114 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18115 * updating at the same time, and one of them could be slightly behind,
18116 * making the assumption that last_value always go forward fail to hold.
18117 */
18118 - last = atomic64_read(&last_value);
18119 + last = atomic64_read_unchecked(&last_value);
18120 do {
18121 if (ret < last)
18122 return last;
18123 - last = atomic64_cmpxchg(&last_value, last, ret);
18124 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18125 } while (unlikely(last != ret));
18126
18127 return ret;
18128 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18129 index d840e69..98e9581 100644
18130 --- a/arch/x86/kernel/reboot.c
18131 +++ b/arch/x86/kernel/reboot.c
18132 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18133 EXPORT_SYMBOL(pm_power_off);
18134
18135 static const struct desc_ptr no_idt = {};
18136 -static int reboot_mode;
18137 +static unsigned short reboot_mode;
18138 enum reboot_type reboot_type = BOOT_ACPI;
18139 int reboot_force;
18140
18141 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18142 extern const unsigned char machine_real_restart_asm[];
18143 extern const u64 machine_real_restart_gdt[3];
18144
18145 -void machine_real_restart(unsigned int type)
18146 +__noreturn void machine_real_restart(unsigned int type)
18147 {
18148 void *restart_va;
18149 unsigned long restart_pa;
18150 - void (*restart_lowmem)(unsigned int);
18151 + void (* __noreturn restart_lowmem)(unsigned int);
18152 u64 *lowmem_gdt;
18153
18154 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18155 + struct desc_struct *gdt;
18156 +#endif
18157 +
18158 local_irq_disable();
18159
18160 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18161 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18162 boot)". This seems like a fairly standard thing that gets set by
18163 REBOOT.COM programs, and the previous reset routine did this
18164 too. */
18165 - *((unsigned short *)0x472) = reboot_mode;
18166 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18167
18168 /* Patch the GDT in the low memory trampoline */
18169 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18170
18171 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18172 restart_pa = virt_to_phys(restart_va);
18173 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18174 + restart_lowmem = (void *)restart_pa;
18175
18176 /* GDT[0]: GDT self-pointer */
18177 lowmem_gdt[0] =
18178 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18179 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18180
18181 /* Jump to the identity-mapped low memory code */
18182 +
18183 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18184 + gdt = get_cpu_gdt_table(smp_processor_id());
18185 + pax_open_kernel();
18186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18187 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18188 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18189 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18190 +#endif
18191 +#ifdef CONFIG_PAX_KERNEXEC
18192 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18193 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18194 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18195 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18196 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18197 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18198 +#endif
18199 + pax_close_kernel();
18200 +#endif
18201 +
18202 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18203 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18204 + unreachable();
18205 +#else
18206 restart_lowmem(type);
18207 +#endif
18208 +
18209 }
18210 #ifdef CONFIG_APM_MODULE
18211 EXPORT_SYMBOL(machine_real_restart);
18212 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18213 * try to force a triple fault and then cycle between hitting the keyboard
18214 * controller and doing that
18215 */
18216 -static void native_machine_emergency_restart(void)
18217 +__noreturn static void native_machine_emergency_restart(void)
18218 {
18219 int i;
18220 int attempt = 0;
18221 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18222 #endif
18223 }
18224
18225 -static void __machine_emergency_restart(int emergency)
18226 +static __noreturn void __machine_emergency_restart(int emergency)
18227 {
18228 reboot_emergency = emergency;
18229 machine_ops.emergency_restart();
18230 }
18231
18232 -static void native_machine_restart(char *__unused)
18233 +static __noreturn void native_machine_restart(char *__unused)
18234 {
18235 printk("machine restart\n");
18236
18237 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18238 __machine_emergency_restart(0);
18239 }
18240
18241 -static void native_machine_halt(void)
18242 +static __noreturn void native_machine_halt(void)
18243 {
18244 /* stop other cpus and apics */
18245 machine_shutdown();
18246 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18247 stop_this_cpu(NULL);
18248 }
18249
18250 -static void native_machine_power_off(void)
18251 +__noreturn static void native_machine_power_off(void)
18252 {
18253 if (pm_power_off) {
18254 if (!reboot_force)
18255 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18256 }
18257 /* a fallback in case there is no PM info available */
18258 tboot_shutdown(TB_SHUTDOWN_HALT);
18259 + unreachable();
18260 }
18261
18262 struct machine_ops machine_ops = {
18263 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18264 index 7a6f3b3..bed145d7 100644
18265 --- a/arch/x86/kernel/relocate_kernel_64.S
18266 +++ b/arch/x86/kernel/relocate_kernel_64.S
18267 @@ -11,6 +11,7 @@
18268 #include <asm/kexec.h>
18269 #include <asm/processor-flags.h>
18270 #include <asm/pgtable_types.h>
18271 +#include <asm/alternative-asm.h>
18272
18273 /*
18274 * Must be relocatable PIC code callable as a C function
18275 @@ -160,13 +161,14 @@ identity_mapped:
18276 xorq %rbp, %rbp
18277 xorq %r8, %r8
18278 xorq %r9, %r9
18279 - xorq %r10, %r9
18280 + xorq %r10, %r10
18281 xorq %r11, %r11
18282 xorq %r12, %r12
18283 xorq %r13, %r13
18284 xorq %r14, %r14
18285 xorq %r15, %r15
18286
18287 + pax_force_retaddr 0, 1
18288 ret
18289
18290 1:
18291 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18292 index d7d5099..28555d0 100644
18293 --- a/arch/x86/kernel/setup.c
18294 +++ b/arch/x86/kernel/setup.c
18295 @@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
18296
18297 switch (data->type) {
18298 case SETUP_E820_EXT:
18299 - parse_e820_ext(data);
18300 + parse_e820_ext((struct setup_data __force_kernel *)data);
18301 break;
18302 case SETUP_DTB:
18303 add_dtb(pa_data);
18304 @@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
18305 * area (640->1Mb) as ram even though it is not.
18306 * take them out.
18307 */
18308 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18309 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18310 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18311 }
18312
18313 @@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
18314
18315 if (!boot_params.hdr.root_flags)
18316 root_mountflags &= ~MS_RDONLY;
18317 - init_mm.start_code = (unsigned long) _text;
18318 - init_mm.end_code = (unsigned long) _etext;
18319 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18320 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18321 init_mm.end_data = (unsigned long) _edata;
18322 init_mm.brk = _brk_end;
18323
18324 - code_resource.start = virt_to_phys(_text);
18325 - code_resource.end = virt_to_phys(_etext)-1;
18326 - data_resource.start = virt_to_phys(_etext);
18327 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18328 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18329 + data_resource.start = virt_to_phys(_sdata);
18330 data_resource.end = virt_to_phys(_edata)-1;
18331 bss_resource.start = virt_to_phys(&__bss_start);
18332 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18333 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18334 index 71f4727..16dc9f7 100644
18335 --- a/arch/x86/kernel/setup_percpu.c
18336 +++ b/arch/x86/kernel/setup_percpu.c
18337 @@ -21,19 +21,17 @@
18338 #include <asm/cpu.h>
18339 #include <asm/stackprotector.h>
18340
18341 -DEFINE_PER_CPU(int, cpu_number);
18342 +#ifdef CONFIG_SMP
18343 +DEFINE_PER_CPU(unsigned int, cpu_number);
18344 EXPORT_PER_CPU_SYMBOL(cpu_number);
18345 +#endif
18346
18347 -#ifdef CONFIG_X86_64
18348 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18349 -#else
18350 -#define BOOT_PERCPU_OFFSET 0
18351 -#endif
18352
18353 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18354 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18355
18356 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18357 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18358 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18359 };
18360 EXPORT_SYMBOL(__per_cpu_offset);
18361 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
18362 {
18363 #ifdef CONFIG_X86_32
18364 struct desc_struct gdt;
18365 + unsigned long base = per_cpu_offset(cpu);
18366
18367 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18368 - 0x2 | DESCTYPE_S, 0x8);
18369 - gdt.s = 1;
18370 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18371 + 0x83 | DESCTYPE_S, 0xC);
18372 write_gdt_entry(get_cpu_gdt_table(cpu),
18373 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18374 #endif
18375 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
18376 /* alrighty, percpu areas up and running */
18377 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18378 for_each_possible_cpu(cpu) {
18379 +#ifdef CONFIG_CC_STACKPROTECTOR
18380 +#ifdef CONFIG_X86_32
18381 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
18382 +#endif
18383 +#endif
18384 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18385 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18386 per_cpu(cpu_number, cpu) = cpu;
18387 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
18388 */
18389 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18390 #endif
18391 +#ifdef CONFIG_CC_STACKPROTECTOR
18392 +#ifdef CONFIG_X86_32
18393 + if (!cpu)
18394 + per_cpu(stack_canary.canary, cpu) = canary;
18395 +#endif
18396 +#endif
18397 /*
18398 * Up to this point, the boot CPU has been using .init.data
18399 * area. Reload any changed state for the boot CPU.
18400 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18401 index 46a01bd..2e88e6d 100644
18402 --- a/arch/x86/kernel/signal.c
18403 +++ b/arch/x86/kernel/signal.c
18404 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18405 * Align the stack pointer according to the i386 ABI,
18406 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18407 */
18408 - sp = ((sp + 4) & -16ul) - 4;
18409 + sp = ((sp - 12) & -16ul) - 4;
18410 #else /* !CONFIG_X86_32 */
18411 sp = round_down(sp, 16) - 8;
18412 #endif
18413 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18414 * Return an always-bogus address instead so we will die with SIGSEGV.
18415 */
18416 if (onsigstack && !likely(on_sig_stack(sp)))
18417 - return (void __user *)-1L;
18418 + return (__force void __user *)-1L;
18419
18420 /* save i387 state */
18421 if (used_math() && save_i387_xstate(*fpstate) < 0)
18422 - return (void __user *)-1L;
18423 + return (__force void __user *)-1L;
18424
18425 return (void __user *)sp;
18426 }
18427 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18428 }
18429
18430 if (current->mm->context.vdso)
18431 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18432 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18433 else
18434 - restorer = &frame->retcode;
18435 + restorer = (void __user *)&frame->retcode;
18436 if (ka->sa.sa_flags & SA_RESTORER)
18437 restorer = ka->sa.sa_restorer;
18438
18439 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18440 * reasons and because gdb uses it as a signature to notice
18441 * signal handler stack frames.
18442 */
18443 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18444 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18445
18446 if (err)
18447 return -EFAULT;
18448 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18449 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18450
18451 /* Set up to return from userspace. */
18452 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18453 + if (current->mm->context.vdso)
18454 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18455 + else
18456 + restorer = (void __user *)&frame->retcode;
18457 if (ka->sa.sa_flags & SA_RESTORER)
18458 restorer = ka->sa.sa_restorer;
18459 put_user_ex(restorer, &frame->pretcode);
18460 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18461 * reasons and because gdb uses it as a signature to notice
18462 * signal handler stack frames.
18463 */
18464 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18465 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18466 } put_user_catch(err);
18467
18468 if (err)
18469 @@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
18470 * X86_32: vm86 regs switched out by assembly code before reaching
18471 * here, so testing against kernel CS suffices.
18472 */
18473 - if (!user_mode(regs))
18474 + if (!user_mode_novm(regs))
18475 return;
18476
18477 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18478 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18479 index 66d250c..f1b10bd 100644
18480 --- a/arch/x86/kernel/smpboot.c
18481 +++ b/arch/x86/kernel/smpboot.c
18482 @@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18483 set_idle_for_cpu(cpu, c_idle.idle);
18484 do_rest:
18485 per_cpu(current_task, cpu) = c_idle.idle;
18486 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18487 #ifdef CONFIG_X86_32
18488 /* Stack for startup_32 can be just as for start_secondary onwards */
18489 irq_ctx_init(cpu);
18490 #else
18491 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18492 initial_gs = per_cpu_offset(cpu);
18493 - per_cpu(kernel_stack, cpu) =
18494 - (unsigned long)task_stack_page(c_idle.idle) -
18495 - KERNEL_STACK_OFFSET + THREAD_SIZE;
18496 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18497 #endif
18498 +
18499 + pax_open_kernel();
18500 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18501 + pax_close_kernel();
18502 +
18503 initial_code = (unsigned long)start_secondary;
18504 stack_start = c_idle.idle->thread.sp;
18505
18506 @@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18507
18508 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18509
18510 +#ifdef CONFIG_PAX_PER_CPU_PGD
18511 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18512 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18513 + KERNEL_PGD_PTRS);
18514 +#endif
18515 +
18516 err = do_boot_cpu(apicid, cpu);
18517 if (err) {
18518 pr_debug("do_boot_cpu failed %d\n", err);
18519 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18520 index c346d11..d43b163 100644
18521 --- a/arch/x86/kernel/step.c
18522 +++ b/arch/x86/kernel/step.c
18523 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18524 struct desc_struct *desc;
18525 unsigned long base;
18526
18527 - seg &= ~7UL;
18528 + seg >>= 3;
18529
18530 mutex_lock(&child->mm->context.lock);
18531 - if (unlikely((seg >> 3) >= child->mm->context.size))
18532 + if (unlikely(seg >= child->mm->context.size))
18533 addr = -1L; /* bogus selector, access would fault */
18534 else {
18535 desc = child->mm->context.ldt + seg;
18536 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18537 addr += base;
18538 }
18539 mutex_unlock(&child->mm->context.lock);
18540 - }
18541 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18542 + addr = ktla_ktva(addr);
18543
18544 return addr;
18545 }
18546 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18547 unsigned char opcode[15];
18548 unsigned long addr = convert_ip_to_linear(child, regs);
18549
18550 + if (addr == -EINVAL)
18551 + return 0;
18552 +
18553 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18554 for (i = 0; i < copied; i++) {
18555 switch (opcode[i]) {
18556 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18557 index 0b0cb5f..db6b9ed 100644
18558 --- a/arch/x86/kernel/sys_i386_32.c
18559 +++ b/arch/x86/kernel/sys_i386_32.c
18560 @@ -24,17 +24,224 @@
18561
18562 #include <asm/syscalls.h>
18563
18564 -/*
18565 - * Do a system call from kernel instead of calling sys_execve so we
18566 - * end up with proper pt_regs.
18567 - */
18568 -int kernel_execve(const char *filename,
18569 - const char *const argv[],
18570 - const char *const envp[])
18571 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18572 {
18573 - long __res;
18574 - asm volatile ("int $0x80"
18575 - : "=a" (__res)
18576 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18577 - return __res;
18578 + unsigned long pax_task_size = TASK_SIZE;
18579 +
18580 +#ifdef CONFIG_PAX_SEGMEXEC
18581 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18582 + pax_task_size = SEGMEXEC_TASK_SIZE;
18583 +#endif
18584 +
18585 + if (len > pax_task_size || addr > pax_task_size - len)
18586 + return -EINVAL;
18587 +
18588 + return 0;
18589 +}
18590 +
18591 +unsigned long
18592 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
18593 + unsigned long len, unsigned long pgoff, unsigned long flags)
18594 +{
18595 + struct mm_struct *mm = current->mm;
18596 + struct vm_area_struct *vma;
18597 + unsigned long start_addr, pax_task_size = TASK_SIZE;
18598 +
18599 +#ifdef CONFIG_PAX_SEGMEXEC
18600 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18601 + pax_task_size = SEGMEXEC_TASK_SIZE;
18602 +#endif
18603 +
18604 + pax_task_size -= PAGE_SIZE;
18605 +
18606 + if (len > pax_task_size)
18607 + return -ENOMEM;
18608 +
18609 + if (flags & MAP_FIXED)
18610 + return addr;
18611 +
18612 +#ifdef CONFIG_PAX_RANDMMAP
18613 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18614 +#endif
18615 +
18616 + if (addr) {
18617 + addr = PAGE_ALIGN(addr);
18618 + if (pax_task_size - len >= addr) {
18619 + vma = find_vma(mm, addr);
18620 + if (check_heap_stack_gap(vma, addr, len))
18621 + return addr;
18622 + }
18623 + }
18624 + if (len > mm->cached_hole_size) {
18625 + start_addr = addr = mm->free_area_cache;
18626 + } else {
18627 + start_addr = addr = mm->mmap_base;
18628 + mm->cached_hole_size = 0;
18629 + }
18630 +
18631 +#ifdef CONFIG_PAX_PAGEEXEC
18632 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18633 + start_addr = 0x00110000UL;
18634 +
18635 +#ifdef CONFIG_PAX_RANDMMAP
18636 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18637 + start_addr += mm->delta_mmap & 0x03FFF000UL;
18638 +#endif
18639 +
18640 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18641 + start_addr = addr = mm->mmap_base;
18642 + else
18643 + addr = start_addr;
18644 + }
18645 +#endif
18646 +
18647 +full_search:
18648 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18649 + /* At this point: (!vma || addr < vma->vm_end). */
18650 + if (pax_task_size - len < addr) {
18651 + /*
18652 + * Start a new search - just in case we missed
18653 + * some holes.
18654 + */
18655 + if (start_addr != mm->mmap_base) {
18656 + start_addr = addr = mm->mmap_base;
18657 + mm->cached_hole_size = 0;
18658 + goto full_search;
18659 + }
18660 + return -ENOMEM;
18661 + }
18662 + if (check_heap_stack_gap(vma, addr, len))
18663 + break;
18664 + if (addr + mm->cached_hole_size < vma->vm_start)
18665 + mm->cached_hole_size = vma->vm_start - addr;
18666 + addr = vma->vm_end;
18667 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
18668 + start_addr = addr = mm->mmap_base;
18669 + mm->cached_hole_size = 0;
18670 + goto full_search;
18671 + }
18672 + }
18673 +
18674 + /*
18675 + * Remember the place where we stopped the search:
18676 + */
18677 + mm->free_area_cache = addr + len;
18678 + return addr;
18679 +}
18680 +
18681 +unsigned long
18682 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18683 + const unsigned long len, const unsigned long pgoff,
18684 + const unsigned long flags)
18685 +{
18686 + struct vm_area_struct *vma;
18687 + struct mm_struct *mm = current->mm;
18688 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18689 +
18690 +#ifdef CONFIG_PAX_SEGMEXEC
18691 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18692 + pax_task_size = SEGMEXEC_TASK_SIZE;
18693 +#endif
18694 +
18695 + pax_task_size -= PAGE_SIZE;
18696 +
18697 + /* requested length too big for entire address space */
18698 + if (len > pax_task_size)
18699 + return -ENOMEM;
18700 +
18701 + if (flags & MAP_FIXED)
18702 + return addr;
18703 +
18704 +#ifdef CONFIG_PAX_PAGEEXEC
18705 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18706 + goto bottomup;
18707 +#endif
18708 +
18709 +#ifdef CONFIG_PAX_RANDMMAP
18710 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18711 +#endif
18712 +
18713 + /* requesting a specific address */
18714 + if (addr) {
18715 + addr = PAGE_ALIGN(addr);
18716 + if (pax_task_size - len >= addr) {
18717 + vma = find_vma(mm, addr);
18718 + if (check_heap_stack_gap(vma, addr, len))
18719 + return addr;
18720 + }
18721 + }
18722 +
18723 + /* check if free_area_cache is useful for us */
18724 + if (len <= mm->cached_hole_size) {
18725 + mm->cached_hole_size = 0;
18726 + mm->free_area_cache = mm->mmap_base;
18727 + }
18728 +
18729 + /* either no address requested or can't fit in requested address hole */
18730 + addr = mm->free_area_cache;
18731 +
18732 + /* make sure it can fit in the remaining address space */
18733 + if (addr > len) {
18734 + vma = find_vma(mm, addr-len);
18735 + if (check_heap_stack_gap(vma, addr - len, len))
18736 + /* remember the address as a hint for next time */
18737 + return (mm->free_area_cache = addr-len);
18738 + }
18739 +
18740 + if (mm->mmap_base < len)
18741 + goto bottomup;
18742 +
18743 + addr = mm->mmap_base-len;
18744 +
18745 + do {
18746 + /*
18747 + * Lookup failure means no vma is above this address,
18748 + * else if new region fits below vma->vm_start,
18749 + * return with success:
18750 + */
18751 + vma = find_vma(mm, addr);
18752 + if (check_heap_stack_gap(vma, addr, len))
18753 + /* remember the address as a hint for next time */
18754 + return (mm->free_area_cache = addr);
18755 +
18756 + /* remember the largest hole we saw so far */
18757 + if (addr + mm->cached_hole_size < vma->vm_start)
18758 + mm->cached_hole_size = vma->vm_start - addr;
18759 +
18760 + /* try just below the current vma->vm_start */
18761 + addr = skip_heap_stack_gap(vma, len);
18762 + } while (!IS_ERR_VALUE(addr));
18763 +
18764 +bottomup:
18765 + /*
18766 + * A failed mmap() very likely causes application failure,
18767 + * so fall back to the bottom-up function here. This scenario
18768 + * can happen with large stack limits and large mmap()
18769 + * allocations.
18770 + */
18771 +
18772 +#ifdef CONFIG_PAX_SEGMEXEC
18773 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18774 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
18775 + else
18776 +#endif
18777 +
18778 + mm->mmap_base = TASK_UNMAPPED_BASE;
18779 +
18780 +#ifdef CONFIG_PAX_RANDMMAP
18781 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18782 + mm->mmap_base += mm->delta_mmap;
18783 +#endif
18784 +
18785 + mm->free_area_cache = mm->mmap_base;
18786 + mm->cached_hole_size = ~0UL;
18787 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18788 + /*
18789 + * Restore the topdown base:
18790 + */
18791 + mm->mmap_base = base;
18792 + mm->free_area_cache = base;
18793 + mm->cached_hole_size = ~0UL;
18794 +
18795 + return addr;
18796 }
18797 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
18798 index 0514890..3dbebce 100644
18799 --- a/arch/x86/kernel/sys_x86_64.c
18800 +++ b/arch/x86/kernel/sys_x86_64.c
18801 @@ -95,8 +95,8 @@ out:
18802 return error;
18803 }
18804
18805 -static void find_start_end(unsigned long flags, unsigned long *begin,
18806 - unsigned long *end)
18807 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
18808 + unsigned long *begin, unsigned long *end)
18809 {
18810 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
18811 unsigned long new_begin;
18812 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
18813 *begin = new_begin;
18814 }
18815 } else {
18816 - *begin = TASK_UNMAPPED_BASE;
18817 + *begin = mm->mmap_base;
18818 *end = TASK_SIZE;
18819 }
18820 }
18821 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
18822 if (flags & MAP_FIXED)
18823 return addr;
18824
18825 - find_start_end(flags, &begin, &end);
18826 + find_start_end(mm, flags, &begin, &end);
18827
18828 if (len > end)
18829 return -ENOMEM;
18830
18831 +#ifdef CONFIG_PAX_RANDMMAP
18832 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18833 +#endif
18834 +
18835 if (addr) {
18836 addr = PAGE_ALIGN(addr);
18837 vma = find_vma(mm, addr);
18838 - if (end - len >= addr &&
18839 - (!vma || addr + len <= vma->vm_start))
18840 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
18841 return addr;
18842 }
18843 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
18844 @@ -172,7 +175,7 @@ full_search:
18845 }
18846 return -ENOMEM;
18847 }
18848 - if (!vma || addr + len <= vma->vm_start) {
18849 + if (check_heap_stack_gap(vma, addr, len)) {
18850 /*
18851 * Remember the place where we stopped the search:
18852 */
18853 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18854 {
18855 struct vm_area_struct *vma;
18856 struct mm_struct *mm = current->mm;
18857 - unsigned long addr = addr0;
18858 + unsigned long base = mm->mmap_base, addr = addr0;
18859
18860 /* requested length too big for entire address space */
18861 if (len > TASK_SIZE)
18862 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18863 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
18864 goto bottomup;
18865
18866 +#ifdef CONFIG_PAX_RANDMMAP
18867 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18868 +#endif
18869 +
18870 /* requesting a specific address */
18871 if (addr) {
18872 addr = PAGE_ALIGN(addr);
18873 - vma = find_vma(mm, addr);
18874 - if (TASK_SIZE - len >= addr &&
18875 - (!vma || addr + len <= vma->vm_start))
18876 - return addr;
18877 + if (TASK_SIZE - len >= addr) {
18878 + vma = find_vma(mm, addr);
18879 + if (check_heap_stack_gap(vma, addr, len))
18880 + return addr;
18881 + }
18882 }
18883
18884 /* check if free_area_cache is useful for us */
18885 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18886 ALIGN_TOPDOWN);
18887
18888 vma = find_vma(mm, tmp_addr);
18889 - if (!vma || tmp_addr + len <= vma->vm_start)
18890 + if (check_heap_stack_gap(vma, tmp_addr, len))
18891 /* remember the address as a hint for next time */
18892 return mm->free_area_cache = tmp_addr;
18893 }
18894 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18895 * return with success:
18896 */
18897 vma = find_vma(mm, addr);
18898 - if (!vma || addr+len <= vma->vm_start)
18899 + if (check_heap_stack_gap(vma, addr, len))
18900 /* remember the address as a hint for next time */
18901 return mm->free_area_cache = addr;
18902
18903 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18904 mm->cached_hole_size = vma->vm_start - addr;
18905
18906 /* try just below the current vma->vm_start */
18907 - addr = vma->vm_start-len;
18908 - } while (len < vma->vm_start);
18909 + addr = skip_heap_stack_gap(vma, len);
18910 + } while (!IS_ERR_VALUE(addr));
18911
18912 bottomup:
18913 /*
18914 @@ -270,13 +278,21 @@ bottomup:
18915 * can happen with large stack limits and large mmap()
18916 * allocations.
18917 */
18918 + mm->mmap_base = TASK_UNMAPPED_BASE;
18919 +
18920 +#ifdef CONFIG_PAX_RANDMMAP
18921 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18922 + mm->mmap_base += mm->delta_mmap;
18923 +#endif
18924 +
18925 + mm->free_area_cache = mm->mmap_base;
18926 mm->cached_hole_size = ~0UL;
18927 - mm->free_area_cache = TASK_UNMAPPED_BASE;
18928 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18929 /*
18930 * Restore the topdown base:
18931 */
18932 - mm->free_area_cache = mm->mmap_base;
18933 + mm->mmap_base = base;
18934 + mm->free_area_cache = base;
18935 mm->cached_hole_size = ~0UL;
18936
18937 return addr;
18938 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
18939 index e2410e2..4fe3fbc 100644
18940 --- a/arch/x86/kernel/tboot.c
18941 +++ b/arch/x86/kernel/tboot.c
18942 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
18943
18944 void tboot_shutdown(u32 shutdown_type)
18945 {
18946 - void (*shutdown)(void);
18947 + void (* __noreturn shutdown)(void);
18948
18949 if (!tboot_enabled())
18950 return;
18951 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
18952
18953 switch_to_tboot_pt();
18954
18955 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
18956 + shutdown = (void *)tboot->shutdown_entry;
18957 shutdown();
18958
18959 /* should not reach here */
18960 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
18961 tboot_shutdown(acpi_shutdown_map[sleep_state]);
18962 }
18963
18964 -static atomic_t ap_wfs_count;
18965 +static atomic_unchecked_t ap_wfs_count;
18966
18967 static int tboot_wait_for_aps(int num_aps)
18968 {
18969 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
18970 {
18971 switch (action) {
18972 case CPU_DYING:
18973 - atomic_inc(&ap_wfs_count);
18974 + atomic_inc_unchecked(&ap_wfs_count);
18975 if (num_online_cpus() == 1)
18976 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
18977 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
18978 return NOTIFY_BAD;
18979 break;
18980 }
18981 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
18982
18983 tboot_create_trampoline();
18984
18985 - atomic_set(&ap_wfs_count, 0);
18986 + atomic_set_unchecked(&ap_wfs_count, 0);
18987 register_hotcpu_notifier(&tboot_cpu_notifier);
18988 return 0;
18989 }
18990 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
18991 index dd5fbf4..b7f2232 100644
18992 --- a/arch/x86/kernel/time.c
18993 +++ b/arch/x86/kernel/time.c
18994 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
18995 {
18996 unsigned long pc = instruction_pointer(regs);
18997
18998 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
18999 + if (!user_mode(regs) && in_lock_functions(pc)) {
19000 #ifdef CONFIG_FRAME_POINTER
19001 - return *(unsigned long *)(regs->bp + sizeof(long));
19002 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19003 #else
19004 unsigned long *sp =
19005 (unsigned long *)kernel_stack_pointer(regs);
19006 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19007 * or above a saved flags. Eflags has bits 22-31 zero,
19008 * kernel addresses don't.
19009 */
19010 +
19011 +#ifdef CONFIG_PAX_KERNEXEC
19012 + return ktla_ktva(sp[0]);
19013 +#else
19014 if (sp[0] >> 22)
19015 return sp[0];
19016 if (sp[1] >> 22)
19017 return sp[1];
19018 #endif
19019 +
19020 +#endif
19021 }
19022 return pc;
19023 }
19024 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19025 index 6bb7b85..dd853e1 100644
19026 --- a/arch/x86/kernel/tls.c
19027 +++ b/arch/x86/kernel/tls.c
19028 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19029 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19030 return -EINVAL;
19031
19032 +#ifdef CONFIG_PAX_SEGMEXEC
19033 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19034 + return -EINVAL;
19035 +#endif
19036 +
19037 set_tls_desc(p, idx, &info, 1);
19038
19039 return 0;
19040 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19041 index 451c0a7..e57f551 100644
19042 --- a/arch/x86/kernel/trampoline_32.S
19043 +++ b/arch/x86/kernel/trampoline_32.S
19044 @@ -32,6 +32,12 @@
19045 #include <asm/segment.h>
19046 #include <asm/page_types.h>
19047
19048 +#ifdef CONFIG_PAX_KERNEXEC
19049 +#define ta(X) (X)
19050 +#else
19051 +#define ta(X) ((X) - __PAGE_OFFSET)
19052 +#endif
19053 +
19054 #ifdef CONFIG_SMP
19055
19056 .section ".x86_trampoline","a"
19057 @@ -62,7 +68,7 @@ r_base = .
19058 inc %ax # protected mode (PE) bit
19059 lmsw %ax # into protected mode
19060 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19061 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19062 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19063
19064 # These need to be in the same 64K segment as the above;
19065 # hence we don't use the boot_gdt_descr defined in head.S
19066 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19067 index 09ff517..df19fbff 100644
19068 --- a/arch/x86/kernel/trampoline_64.S
19069 +++ b/arch/x86/kernel/trampoline_64.S
19070 @@ -90,7 +90,7 @@ startup_32:
19071 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19072 movl %eax, %ds
19073
19074 - movl $X86_CR4_PAE, %eax
19075 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19076 movl %eax, %cr4 # Enable PAE mode
19077
19078 # Setup trampoline 4 level pagetables
19079 @@ -138,7 +138,7 @@ tidt:
19080 # so the kernel can live anywhere
19081 .balign 4
19082 tgdt:
19083 - .short tgdt_end - tgdt # gdt limit
19084 + .short tgdt_end - tgdt - 1 # gdt limit
19085 .long tgdt - r_base
19086 .short 0
19087 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19088 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19089 index 4bbe04d..41d0943 100644
19090 --- a/arch/x86/kernel/traps.c
19091 +++ b/arch/x86/kernel/traps.c
19092 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19093
19094 /* Do we ignore FPU interrupts ? */
19095 char ignore_fpu_irq;
19096 -
19097 -/*
19098 - * The IDT has to be page-aligned to simplify the Pentium
19099 - * F0 0F bug workaround.
19100 - */
19101 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19102 #endif
19103
19104 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19105 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19106 }
19107
19108 static void __kprobes
19109 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19110 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19111 long error_code, siginfo_t *info)
19112 {
19113 struct task_struct *tsk = current;
19114
19115 #ifdef CONFIG_X86_32
19116 - if (regs->flags & X86_VM_MASK) {
19117 + if (v8086_mode(regs)) {
19118 /*
19119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19120 * On nmi (interrupt 2), do_trap should not be called.
19121 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19122 }
19123 #endif
19124
19125 - if (!user_mode(regs))
19126 + if (!user_mode_novm(regs))
19127 goto kernel_trap;
19128
19129 #ifdef CONFIG_X86_32
19130 @@ -148,7 +142,7 @@ trap_signal:
19131 printk_ratelimit()) {
19132 printk(KERN_INFO
19133 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19134 - tsk->comm, tsk->pid, str,
19135 + tsk->comm, task_pid_nr(tsk), str,
19136 regs->ip, regs->sp, error_code);
19137 print_vma_addr(" in ", regs->ip);
19138 printk("\n");
19139 @@ -165,8 +159,20 @@ kernel_trap:
19140 if (!fixup_exception(regs)) {
19141 tsk->thread.error_code = error_code;
19142 tsk->thread.trap_no = trapnr;
19143 +
19144 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19145 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19146 + str = "PAX: suspicious stack segment fault";
19147 +#endif
19148 +
19149 die(str, regs, error_code);
19150 }
19151 +
19152 +#ifdef CONFIG_PAX_REFCOUNT
19153 + if (trapnr == 4)
19154 + pax_report_refcount_overflow(regs);
19155 +#endif
19156 +
19157 return;
19158
19159 #ifdef CONFIG_X86_32
19160 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19161 conditional_sti(regs);
19162
19163 #ifdef CONFIG_X86_32
19164 - if (regs->flags & X86_VM_MASK)
19165 + if (v8086_mode(regs))
19166 goto gp_in_vm86;
19167 #endif
19168
19169 tsk = current;
19170 - if (!user_mode(regs))
19171 + if (!user_mode_novm(regs))
19172 goto gp_in_kernel;
19173
19174 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19175 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19176 + struct mm_struct *mm = tsk->mm;
19177 + unsigned long limit;
19178 +
19179 + down_write(&mm->mmap_sem);
19180 + limit = mm->context.user_cs_limit;
19181 + if (limit < TASK_SIZE) {
19182 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19183 + up_write(&mm->mmap_sem);
19184 + return;
19185 + }
19186 + up_write(&mm->mmap_sem);
19187 + }
19188 +#endif
19189 +
19190 tsk->thread.error_code = error_code;
19191 tsk->thread.trap_no = 13;
19192
19193 @@ -295,6 +317,13 @@ gp_in_kernel:
19194 if (notify_die(DIE_GPF, "general protection fault", regs,
19195 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19196 return;
19197 +
19198 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19199 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19200 + die("PAX: suspicious general protection fault", regs, error_code);
19201 + else
19202 +#endif
19203 +
19204 die("general protection fault", regs, error_code);
19205 }
19206
19207 @@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19208 /* It's safe to allow irq's after DR6 has been saved */
19209 preempt_conditional_sti(regs);
19210
19211 - if (regs->flags & X86_VM_MASK) {
19212 + if (v8086_mode(regs)) {
19213 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19214 error_code, 1);
19215 preempt_conditional_cli(regs);
19216 @@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19217 * We already checked v86 mode above, so we can check for kernel mode
19218 * by just checking the CPL of CS.
19219 */
19220 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19221 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19222 tsk->thread.debugreg6 &= ~DR_STEP;
19223 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19224 regs->flags &= ~X86_EFLAGS_TF;
19225 @@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19226 return;
19227 conditional_sti(regs);
19228
19229 - if (!user_mode_vm(regs))
19230 + if (!user_mode(regs))
19231 {
19232 if (!fixup_exception(regs)) {
19233 task->thread.error_code = error_code;
19234 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19235 index b9242ba..50c5edd 100644
19236 --- a/arch/x86/kernel/verify_cpu.S
19237 +++ b/arch/x86/kernel/verify_cpu.S
19238 @@ -20,6 +20,7 @@
19239 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19240 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19241 * arch/x86/kernel/head_32.S: processor startup
19242 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19243 *
19244 * verify_cpu, returns the status of longmode and SSE in register %eax.
19245 * 0: Success 1: Failure
19246 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19247 index b466cab..a0df083 100644
19248 --- a/arch/x86/kernel/vm86_32.c
19249 +++ b/arch/x86/kernel/vm86_32.c
19250 @@ -41,6 +41,7 @@
19251 #include <linux/ptrace.h>
19252 #include <linux/audit.h>
19253 #include <linux/stddef.h>
19254 +#include <linux/grsecurity.h>
19255
19256 #include <asm/uaccess.h>
19257 #include <asm/io.h>
19258 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19259 do_exit(SIGSEGV);
19260 }
19261
19262 - tss = &per_cpu(init_tss, get_cpu());
19263 + tss = init_tss + get_cpu();
19264 current->thread.sp0 = current->thread.saved_sp0;
19265 current->thread.sysenter_cs = __KERNEL_CS;
19266 load_sp0(tss, &current->thread);
19267 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19268 struct task_struct *tsk;
19269 int tmp, ret = -EPERM;
19270
19271 +#ifdef CONFIG_GRKERNSEC_VM86
19272 + if (!capable(CAP_SYS_RAWIO)) {
19273 + gr_handle_vm86();
19274 + goto out;
19275 + }
19276 +#endif
19277 +
19278 tsk = current;
19279 if (tsk->thread.saved_sp0)
19280 goto out;
19281 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19282 int tmp, ret;
19283 struct vm86plus_struct __user *v86;
19284
19285 +#ifdef CONFIG_GRKERNSEC_VM86
19286 + if (!capable(CAP_SYS_RAWIO)) {
19287 + gr_handle_vm86();
19288 + ret = -EPERM;
19289 + goto out;
19290 + }
19291 +#endif
19292 +
19293 tsk = current;
19294 switch (cmd) {
19295 case VM86_REQUEST_IRQ:
19296 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19297 tsk->thread.saved_fs = info->regs32->fs;
19298 tsk->thread.saved_gs = get_user_gs(info->regs32);
19299
19300 - tss = &per_cpu(init_tss, get_cpu());
19301 + tss = init_tss + get_cpu();
19302 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19303 if (cpu_has_sep)
19304 tsk->thread.sysenter_cs = 0;
19305 @@ -531,7 +547,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19306 goto cannot_handle;
19307 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19308 goto cannot_handle;
19309 - intr_ptr = (unsigned long __user *) (i << 2);
19310 + intr_ptr = (__force unsigned long __user *) (i << 2);
19311 if (get_user(segoffs, intr_ptr))
19312 goto cannot_handle;
19313 if ((segoffs >> 16) == BIOSSEG)
19314 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19315 index 0f703f1..9e15f64 100644
19316 --- a/arch/x86/kernel/vmlinux.lds.S
19317 +++ b/arch/x86/kernel/vmlinux.lds.S
19318 @@ -26,6 +26,13 @@
19319 #include <asm/page_types.h>
19320 #include <asm/cache.h>
19321 #include <asm/boot.h>
19322 +#include <asm/segment.h>
19323 +
19324 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19325 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19326 +#else
19327 +#define __KERNEL_TEXT_OFFSET 0
19328 +#endif
19329
19330 #undef i386 /* in case the preprocessor is a 32bit one */
19331
19332 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19333
19334 PHDRS {
19335 text PT_LOAD FLAGS(5); /* R_E */
19336 +#ifdef CONFIG_X86_32
19337 + module PT_LOAD FLAGS(5); /* R_E */
19338 +#endif
19339 +#ifdef CONFIG_XEN
19340 + rodata PT_LOAD FLAGS(5); /* R_E */
19341 +#else
19342 + rodata PT_LOAD FLAGS(4); /* R__ */
19343 +#endif
19344 data PT_LOAD FLAGS(6); /* RW_ */
19345 -#ifdef CONFIG_X86_64
19346 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19347 #ifdef CONFIG_SMP
19348 percpu PT_LOAD FLAGS(6); /* RW_ */
19349 #endif
19350 + text.init PT_LOAD FLAGS(5); /* R_E */
19351 + text.exit PT_LOAD FLAGS(5); /* R_E */
19352 init PT_LOAD FLAGS(7); /* RWE */
19353 -#endif
19354 note PT_NOTE FLAGS(0); /* ___ */
19355 }
19356
19357 SECTIONS
19358 {
19359 #ifdef CONFIG_X86_32
19360 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19361 - phys_startup_32 = startup_32 - LOAD_OFFSET;
19362 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19363 #else
19364 - . = __START_KERNEL;
19365 - phys_startup_64 = startup_64 - LOAD_OFFSET;
19366 + . = __START_KERNEL;
19367 #endif
19368
19369 /* Text and read-only data */
19370 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
19371 - _text = .;
19372 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19373 /* bootstrapping code */
19374 +#ifdef CONFIG_X86_32
19375 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19376 +#else
19377 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19378 +#endif
19379 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19380 + _text = .;
19381 HEAD_TEXT
19382 #ifdef CONFIG_X86_32
19383 . = ALIGN(PAGE_SIZE);
19384 @@ -108,13 +128,47 @@ SECTIONS
19385 IRQENTRY_TEXT
19386 *(.fixup)
19387 *(.gnu.warning)
19388 - /* End of text section */
19389 - _etext = .;
19390 } :text = 0x9090
19391
19392 - NOTES :text :note
19393 + . += __KERNEL_TEXT_OFFSET;
19394
19395 - EXCEPTION_TABLE(16) :text = 0x9090
19396 +#ifdef CONFIG_X86_32
19397 + . = ALIGN(PAGE_SIZE);
19398 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19399 +
19400 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19401 + MODULES_EXEC_VADDR = .;
19402 + BYTE(0)
19403 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19404 + . = ALIGN(HPAGE_SIZE);
19405 + MODULES_EXEC_END = . - 1;
19406 +#endif
19407 +
19408 + } :module
19409 +#endif
19410 +
19411 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19412 + /* End of text section */
19413 + _etext = . - __KERNEL_TEXT_OFFSET;
19414 + }
19415 +
19416 +#ifdef CONFIG_X86_32
19417 + . = ALIGN(PAGE_SIZE);
19418 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19419 + *(.idt)
19420 + . = ALIGN(PAGE_SIZE);
19421 + *(.empty_zero_page)
19422 + *(.initial_pg_fixmap)
19423 + *(.initial_pg_pmd)
19424 + *(.initial_page_table)
19425 + *(.swapper_pg_dir)
19426 + } :rodata
19427 +#endif
19428 +
19429 + . = ALIGN(PAGE_SIZE);
19430 + NOTES :rodata :note
19431 +
19432 + EXCEPTION_TABLE(16) :rodata
19433
19434 #if defined(CONFIG_DEBUG_RODATA)
19435 /* .text should occupy whole number of pages */
19436 @@ -126,16 +180,20 @@ SECTIONS
19437
19438 /* Data */
19439 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19440 +
19441 +#ifdef CONFIG_PAX_KERNEXEC
19442 + . = ALIGN(HPAGE_SIZE);
19443 +#else
19444 + . = ALIGN(PAGE_SIZE);
19445 +#endif
19446 +
19447 /* Start of data section */
19448 _sdata = .;
19449
19450 /* init_task */
19451 INIT_TASK_DATA(THREAD_SIZE)
19452
19453 -#ifdef CONFIG_X86_32
19454 - /* 32 bit has nosave before _edata */
19455 NOSAVE_DATA
19456 -#endif
19457
19458 PAGE_ALIGNED_DATA(PAGE_SIZE)
19459
19460 @@ -176,12 +234,19 @@ SECTIONS
19461 #endif /* CONFIG_X86_64 */
19462
19463 /* Init code and data - will be freed after init */
19464 - . = ALIGN(PAGE_SIZE);
19465 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19466 + BYTE(0)
19467 +
19468 +#ifdef CONFIG_PAX_KERNEXEC
19469 + . = ALIGN(HPAGE_SIZE);
19470 +#else
19471 + . = ALIGN(PAGE_SIZE);
19472 +#endif
19473 +
19474 __init_begin = .; /* paired with __init_end */
19475 - }
19476 + } :init.begin
19477
19478 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19479 +#ifdef CONFIG_SMP
19480 /*
19481 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19482 * output PHDR, so the next output section - .init.text - should
19483 @@ -190,12 +255,27 @@ SECTIONS
19484 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19485 #endif
19486
19487 - INIT_TEXT_SECTION(PAGE_SIZE)
19488 -#ifdef CONFIG_X86_64
19489 - :init
19490 -#endif
19491 + . = ALIGN(PAGE_SIZE);
19492 + init_begin = .;
19493 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19494 + VMLINUX_SYMBOL(_sinittext) = .;
19495 + INIT_TEXT
19496 + VMLINUX_SYMBOL(_einittext) = .;
19497 + . = ALIGN(PAGE_SIZE);
19498 + } :text.init
19499
19500 - INIT_DATA_SECTION(16)
19501 + /*
19502 + * .exit.text is discard at runtime, not link time, to deal with
19503 + * references from .altinstructions and .eh_frame
19504 + */
19505 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19506 + EXIT_TEXT
19507 + . = ALIGN(16);
19508 + } :text.exit
19509 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19510 +
19511 + . = ALIGN(PAGE_SIZE);
19512 + INIT_DATA_SECTION(16) :init
19513
19514 /*
19515 * Code and data for a variety of lowlevel trampolines, to be
19516 @@ -269,19 +349,12 @@ SECTIONS
19517 }
19518
19519 . = ALIGN(8);
19520 - /*
19521 - * .exit.text is discard at runtime, not link time, to deal with
19522 - * references from .altinstructions and .eh_frame
19523 - */
19524 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19525 - EXIT_TEXT
19526 - }
19527
19528 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19529 EXIT_DATA
19530 }
19531
19532 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19533 +#ifndef CONFIG_SMP
19534 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19535 #endif
19536
19537 @@ -300,16 +373,10 @@ SECTIONS
19538 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19539 __smp_locks = .;
19540 *(.smp_locks)
19541 - . = ALIGN(PAGE_SIZE);
19542 __smp_locks_end = .;
19543 + . = ALIGN(PAGE_SIZE);
19544 }
19545
19546 -#ifdef CONFIG_X86_64
19547 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19548 - NOSAVE_DATA
19549 - }
19550 -#endif
19551 -
19552 /* BSS */
19553 . = ALIGN(PAGE_SIZE);
19554 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19555 @@ -325,6 +392,7 @@ SECTIONS
19556 __brk_base = .;
19557 . += 64 * 1024; /* 64k alignment slop space */
19558 *(.brk_reservation) /* areas brk users have reserved */
19559 + . = ALIGN(HPAGE_SIZE);
19560 __brk_limit = .;
19561 }
19562
19563 @@ -351,13 +419,12 @@ SECTIONS
19564 * for the boot processor.
19565 */
19566 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19567 -INIT_PER_CPU(gdt_page);
19568 INIT_PER_CPU(irq_stack_union);
19569
19570 /*
19571 * Build-time check on the image size:
19572 */
19573 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19574 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19575 "kernel image bigger than KERNEL_IMAGE_SIZE");
19576
19577 #ifdef CONFIG_SMP
19578 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19579 index b07ba93..a212969 100644
19580 --- a/arch/x86/kernel/vsyscall_64.c
19581 +++ b/arch/x86/kernel/vsyscall_64.c
19582 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19583 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19584 };
19585
19586 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
19587 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19588
19589 static int __init vsyscall_setup(char *str)
19590 {
19591 if (str) {
19592 if (!strcmp("emulate", str))
19593 vsyscall_mode = EMULATE;
19594 - else if (!strcmp("native", str))
19595 - vsyscall_mode = NATIVE;
19596 else if (!strcmp("none", str))
19597 vsyscall_mode = NONE;
19598 else
19599 @@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19600
19601 tsk = current;
19602 if (seccomp_mode(&tsk->seccomp))
19603 - do_exit(SIGKILL);
19604 + do_group_exit(SIGKILL);
19605
19606 /*
19607 * With a real vsyscall, page faults cause SIGSEGV. We want to
19608 @@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19609 return true;
19610
19611 sigsegv:
19612 - force_sig(SIGSEGV, current);
19613 - return true;
19614 + do_group_exit(SIGKILL);
19615 }
19616
19617 /*
19618 @@ -333,10 +330,7 @@ void __init map_vsyscall(void)
19619 extern char __vvar_page;
19620 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19621
19622 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19623 - vsyscall_mode == NATIVE
19624 - ? PAGE_KERNEL_VSYSCALL
19625 - : PAGE_KERNEL_VVAR);
19626 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19627 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19628 (unsigned long)VSYSCALL_START);
19629
19630 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19631 index 9796c2f..f686fbf 100644
19632 --- a/arch/x86/kernel/x8664_ksyms_64.c
19633 +++ b/arch/x86/kernel/x8664_ksyms_64.c
19634 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19635 EXPORT_SYMBOL(copy_user_generic_string);
19636 EXPORT_SYMBOL(copy_user_generic_unrolled);
19637 EXPORT_SYMBOL(__copy_user_nocache);
19638 -EXPORT_SYMBOL(_copy_from_user);
19639 -EXPORT_SYMBOL(_copy_to_user);
19640
19641 EXPORT_SYMBOL(copy_page);
19642 EXPORT_SYMBOL(clear_page);
19643 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19644 index 7110911..e8cdee5 100644
19645 --- a/arch/x86/kernel/xsave.c
19646 +++ b/arch/x86/kernel/xsave.c
19647 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19648 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19649 return -EINVAL;
19650
19651 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19652 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19653 fx_sw_user->extended_size -
19654 FP_XSTATE_MAGIC2_SIZE));
19655 if (err)
19656 @@ -266,7 +266,7 @@ fx_only:
19657 * the other extended state.
19658 */
19659 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19660 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19661 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19662 }
19663
19664 /*
19665 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19666 if (use_xsave())
19667 err = restore_user_xstate(buf);
19668 else
19669 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
19670 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19671 buf);
19672 if (unlikely(err)) {
19673 /*
19674 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19675 index 89b02bf..0f6511d 100644
19676 --- a/arch/x86/kvm/cpuid.c
19677 +++ b/arch/x86/kvm/cpuid.c
19678 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19679 struct kvm_cpuid2 *cpuid,
19680 struct kvm_cpuid_entry2 __user *entries)
19681 {
19682 - int r;
19683 + int r, i;
19684
19685 r = -E2BIG;
19686 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19687 goto out;
19688 r = -EFAULT;
19689 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19690 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19691 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19692 goto out;
19693 + for (i = 0; i < cpuid->nent; ++i) {
19694 + struct kvm_cpuid_entry2 cpuid_entry;
19695 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19696 + goto out;
19697 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
19698 + }
19699 vcpu->arch.cpuid_nent = cpuid->nent;
19700 kvm_apic_set_version(vcpu);
19701 kvm_x86_ops->cpuid_update(vcpu);
19702 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19703 struct kvm_cpuid2 *cpuid,
19704 struct kvm_cpuid_entry2 __user *entries)
19705 {
19706 - int r;
19707 + int r, i;
19708
19709 r = -E2BIG;
19710 if (cpuid->nent < vcpu->arch.cpuid_nent)
19711 goto out;
19712 r = -EFAULT;
19713 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19714 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19715 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19716 goto out;
19717 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
19718 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
19719 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
19720 + goto out;
19721 + }
19722 return 0;
19723
19724 out:
19725 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19726 index 0982507..7f6d72f 100644
19727 --- a/arch/x86/kvm/emulate.c
19728 +++ b/arch/x86/kvm/emulate.c
19729 @@ -250,6 +250,7 @@ struct gprefix {
19730
19731 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
19732 do { \
19733 + unsigned long _tmp; \
19734 __asm__ __volatile__ ( \
19735 _PRE_EFLAGS("0", "4", "2") \
19736 _op _suffix " %"_x"3,%1; " \
19737 @@ -264,8 +265,6 @@ struct gprefix {
19738 /* Raw emulation: instruction has two explicit operands. */
19739 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
19740 do { \
19741 - unsigned long _tmp; \
19742 - \
19743 switch ((ctxt)->dst.bytes) { \
19744 case 2: \
19745 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
19746 @@ -281,7 +280,6 @@ struct gprefix {
19747
19748 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
19749 do { \
19750 - unsigned long _tmp; \
19751 switch ((ctxt)->dst.bytes) { \
19752 case 1: \
19753 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
19754 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19755 index cfdc6e0..ab92e84 100644
19756 --- a/arch/x86/kvm/lapic.c
19757 +++ b/arch/x86/kvm/lapic.c
19758 @@ -54,7 +54,7 @@
19759 #define APIC_BUS_CYCLE_NS 1
19760
19761 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
19762 -#define apic_debug(fmt, arg...)
19763 +#define apic_debug(fmt, arg...) do {} while (0)
19764
19765 #define APIC_LVT_NUM 6
19766 /* 14 is the version for Xeon and Pentium 8.4.8*/
19767 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
19768 index 1561028..0ed7f14 100644
19769 --- a/arch/x86/kvm/paging_tmpl.h
19770 +++ b/arch/x86/kvm/paging_tmpl.h
19771 @@ -197,7 +197,7 @@ retry_walk:
19772 if (unlikely(kvm_is_error_hva(host_addr)))
19773 goto error;
19774
19775 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
19776 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
19777 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
19778 goto error;
19779
19780 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
19781 index e385214..f8df033 100644
19782 --- a/arch/x86/kvm/svm.c
19783 +++ b/arch/x86/kvm/svm.c
19784 @@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
19785 int cpu = raw_smp_processor_id();
19786
19787 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19788 +
19789 + pax_open_kernel();
19790 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
19791 + pax_close_kernel();
19792 +
19793 load_TR_desc();
19794 }
19795
19796 @@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
19797 #endif
19798 #endif
19799
19800 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19801 + __set_fs(current_thread_info()->addr_limit);
19802 +#endif
19803 +
19804 reload_tss(vcpu);
19805
19806 local_irq_disable();
19807 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
19808 index 3b4c8d8..f457b63 100644
19809 --- a/arch/x86/kvm/vmx.c
19810 +++ b/arch/x86/kvm/vmx.c
19811 @@ -1306,7 +1306,11 @@ static void reload_tss(void)
19812 struct desc_struct *descs;
19813
19814 descs = (void *)gdt->address;
19815 +
19816 + pax_open_kernel();
19817 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
19818 + pax_close_kernel();
19819 +
19820 load_TR_desc();
19821 }
19822
19823 @@ -2631,8 +2635,11 @@ static __init int hardware_setup(void)
19824 if (!cpu_has_vmx_flexpriority())
19825 flexpriority_enabled = 0;
19826
19827 - if (!cpu_has_vmx_tpr_shadow())
19828 - kvm_x86_ops->update_cr8_intercept = NULL;
19829 + if (!cpu_has_vmx_tpr_shadow()) {
19830 + pax_open_kernel();
19831 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
19832 + pax_close_kernel();
19833 + }
19834
19835 if (enable_ept && !cpu_has_vmx_ept_2m_page())
19836 kvm_disable_largepages();
19837 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
19838 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
19839
19840 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
19841 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
19842 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
19843
19844 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
19845 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
19846 @@ -6184,6 +6191,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19847 "jmp .Lkvm_vmx_return \n\t"
19848 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
19849 ".Lkvm_vmx_return: "
19850 +
19851 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19852 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
19853 + ".Lkvm_vmx_return2: "
19854 +#endif
19855 +
19856 /* Save guest registers, load host registers, keep flags */
19857 "mov %0, %c[wordsize](%%"R"sp) \n\t"
19858 "pop %0 \n\t"
19859 @@ -6232,6 +6245,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19860 #endif
19861 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
19862 [wordsize]"i"(sizeof(ulong))
19863 +
19864 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19865 + ,[cs]"i"(__KERNEL_CS)
19866 +#endif
19867 +
19868 : "cc", "memory"
19869 , R"ax", R"bx", R"di", R"si"
19870 #ifdef CONFIG_X86_64
19871 @@ -6260,7 +6278,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19872 }
19873 }
19874
19875 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
19876 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
19877 +
19878 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19879 + loadsegment(fs, __KERNEL_PERCPU);
19880 +#endif
19881 +
19882 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19883 + __set_fs(current_thread_info()->addr_limit);
19884 +#endif
19885 +
19886 vmx->loaded_vmcs->launched = 1;
19887
19888 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
19889 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19890 index 9cbfc06..7ddc9fa 100644
19891 --- a/arch/x86/kvm/x86.c
19892 +++ b/arch/x86/kvm/x86.c
19893 @@ -1311,8 +1311,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
19894 {
19895 struct kvm *kvm = vcpu->kvm;
19896 int lm = is_long_mode(vcpu);
19897 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19898 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19899 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19900 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19901 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
19902 : kvm->arch.xen_hvm_config.blob_size_32;
19903 u32 page_num = data & ~PAGE_MASK;
19904 @@ -2145,6 +2145,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
19905 if (n < msr_list.nmsrs)
19906 goto out;
19907 r = -EFAULT;
19908 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
19909 + goto out;
19910 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
19911 num_msrs_to_save * sizeof(u32)))
19912 goto out;
19913 @@ -2266,7 +2268,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
19914 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
19915 struct kvm_interrupt *irq)
19916 {
19917 - if (irq->irq < 0 || irq->irq >= 256)
19918 + if (irq->irq >= 256)
19919 return -EINVAL;
19920 if (irqchip_in_kernel(vcpu->kvm))
19921 return -ENXIO;
19922 @@ -4780,7 +4782,7 @@ static void kvm_set_mmio_spte_mask(void)
19923 kvm_mmu_set_mmio_spte_mask(mask);
19924 }
19925
19926 -int kvm_arch_init(void *opaque)
19927 +int kvm_arch_init(const void *opaque)
19928 {
19929 int r;
19930 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
19931 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
19932 index 642d880..44e0f3f 100644
19933 --- a/arch/x86/lguest/boot.c
19934 +++ b/arch/x86/lguest/boot.c
19935 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
19936 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
19937 * Launcher to reboot us.
19938 */
19939 -static void lguest_restart(char *reason)
19940 +static __noreturn void lguest_restart(char *reason)
19941 {
19942 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
19943 + BUG();
19944 }
19945
19946 /*G:050
19947 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
19948 index 042f682..c92afb6 100644
19949 --- a/arch/x86/lib/atomic64_32.c
19950 +++ b/arch/x86/lib/atomic64_32.c
19951 @@ -8,18 +8,30 @@
19952
19953 long long atomic64_read_cx8(long long, const atomic64_t *v);
19954 EXPORT_SYMBOL(atomic64_read_cx8);
19955 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19956 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
19957 long long atomic64_set_cx8(long long, const atomic64_t *v);
19958 EXPORT_SYMBOL(atomic64_set_cx8);
19959 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19960 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
19961 long long atomic64_xchg_cx8(long long, unsigned high);
19962 EXPORT_SYMBOL(atomic64_xchg_cx8);
19963 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
19964 EXPORT_SYMBOL(atomic64_add_return_cx8);
19965 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19966 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
19967 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
19968 EXPORT_SYMBOL(atomic64_sub_return_cx8);
19969 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19970 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
19971 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
19972 EXPORT_SYMBOL(atomic64_inc_return_cx8);
19973 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19974 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
19975 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
19976 EXPORT_SYMBOL(atomic64_dec_return_cx8);
19977 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19978 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
19979 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
19980 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
19981 int atomic64_inc_not_zero_cx8(atomic64_t *v);
19982 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
19983 #ifndef CONFIG_X86_CMPXCHG64
19984 long long atomic64_read_386(long long, const atomic64_t *v);
19985 EXPORT_SYMBOL(atomic64_read_386);
19986 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
19987 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
19988 long long atomic64_set_386(long long, const atomic64_t *v);
19989 EXPORT_SYMBOL(atomic64_set_386);
19990 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
19991 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
19992 long long atomic64_xchg_386(long long, unsigned high);
19993 EXPORT_SYMBOL(atomic64_xchg_386);
19994 long long atomic64_add_return_386(long long a, atomic64_t *v);
19995 EXPORT_SYMBOL(atomic64_add_return_386);
19996 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
19997 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
19998 long long atomic64_sub_return_386(long long a, atomic64_t *v);
19999 EXPORT_SYMBOL(atomic64_sub_return_386);
20000 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20001 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20002 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20003 EXPORT_SYMBOL(atomic64_inc_return_386);
20004 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20005 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20006 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20007 EXPORT_SYMBOL(atomic64_dec_return_386);
20008 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20009 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20010 long long atomic64_add_386(long long a, atomic64_t *v);
20011 EXPORT_SYMBOL(atomic64_add_386);
20012 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20013 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20014 long long atomic64_sub_386(long long a, atomic64_t *v);
20015 EXPORT_SYMBOL(atomic64_sub_386);
20016 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20017 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20018 long long atomic64_inc_386(long long a, atomic64_t *v);
20019 EXPORT_SYMBOL(atomic64_inc_386);
20020 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20021 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20022 long long atomic64_dec_386(long long a, atomic64_t *v);
20023 EXPORT_SYMBOL(atomic64_dec_386);
20024 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20025 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20026 long long atomic64_dec_if_positive_386(atomic64_t *v);
20027 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20028 int atomic64_inc_not_zero_386(atomic64_t *v);
20029 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20030 index e8e7e0d..56fd1b0 100644
20031 --- a/arch/x86/lib/atomic64_386_32.S
20032 +++ b/arch/x86/lib/atomic64_386_32.S
20033 @@ -48,6 +48,10 @@ BEGIN(read)
20034 movl (v), %eax
20035 movl 4(v), %edx
20036 RET_ENDP
20037 +BEGIN(read_unchecked)
20038 + movl (v), %eax
20039 + movl 4(v), %edx
20040 +RET_ENDP
20041 #undef v
20042
20043 #define v %esi
20044 @@ -55,6 +59,10 @@ BEGIN(set)
20045 movl %ebx, (v)
20046 movl %ecx, 4(v)
20047 RET_ENDP
20048 +BEGIN(set_unchecked)
20049 + movl %ebx, (v)
20050 + movl %ecx, 4(v)
20051 +RET_ENDP
20052 #undef v
20053
20054 #define v %esi
20055 @@ -70,6 +78,20 @@ RET_ENDP
20056 BEGIN(add)
20057 addl %eax, (v)
20058 adcl %edx, 4(v)
20059 +
20060 +#ifdef CONFIG_PAX_REFCOUNT
20061 + jno 0f
20062 + subl %eax, (v)
20063 + sbbl %edx, 4(v)
20064 + int $4
20065 +0:
20066 + _ASM_EXTABLE(0b, 0b)
20067 +#endif
20068 +
20069 +RET_ENDP
20070 +BEGIN(add_unchecked)
20071 + addl %eax, (v)
20072 + adcl %edx, 4(v)
20073 RET_ENDP
20074 #undef v
20075
20076 @@ -77,6 +99,24 @@ RET_ENDP
20077 BEGIN(add_return)
20078 addl (v), %eax
20079 adcl 4(v), %edx
20080 +
20081 +#ifdef CONFIG_PAX_REFCOUNT
20082 + into
20083 +1234:
20084 + _ASM_EXTABLE(1234b, 2f)
20085 +#endif
20086 +
20087 + movl %eax, (v)
20088 + movl %edx, 4(v)
20089 +
20090 +#ifdef CONFIG_PAX_REFCOUNT
20091 +2:
20092 +#endif
20093 +
20094 +RET_ENDP
20095 +BEGIN(add_return_unchecked)
20096 + addl (v), %eax
20097 + adcl 4(v), %edx
20098 movl %eax, (v)
20099 movl %edx, 4(v)
20100 RET_ENDP
20101 @@ -86,6 +126,20 @@ RET_ENDP
20102 BEGIN(sub)
20103 subl %eax, (v)
20104 sbbl %edx, 4(v)
20105 +
20106 +#ifdef CONFIG_PAX_REFCOUNT
20107 + jno 0f
20108 + addl %eax, (v)
20109 + adcl %edx, 4(v)
20110 + int $4
20111 +0:
20112 + _ASM_EXTABLE(0b, 0b)
20113 +#endif
20114 +
20115 +RET_ENDP
20116 +BEGIN(sub_unchecked)
20117 + subl %eax, (v)
20118 + sbbl %edx, 4(v)
20119 RET_ENDP
20120 #undef v
20121
20122 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20123 sbbl $0, %edx
20124 addl (v), %eax
20125 adcl 4(v), %edx
20126 +
20127 +#ifdef CONFIG_PAX_REFCOUNT
20128 + into
20129 +1234:
20130 + _ASM_EXTABLE(1234b, 2f)
20131 +#endif
20132 +
20133 + movl %eax, (v)
20134 + movl %edx, 4(v)
20135 +
20136 +#ifdef CONFIG_PAX_REFCOUNT
20137 +2:
20138 +#endif
20139 +
20140 +RET_ENDP
20141 +BEGIN(sub_return_unchecked)
20142 + negl %edx
20143 + negl %eax
20144 + sbbl $0, %edx
20145 + addl (v), %eax
20146 + adcl 4(v), %edx
20147 movl %eax, (v)
20148 movl %edx, 4(v)
20149 RET_ENDP
20150 @@ -105,6 +180,20 @@ RET_ENDP
20151 BEGIN(inc)
20152 addl $1, (v)
20153 adcl $0, 4(v)
20154 +
20155 +#ifdef CONFIG_PAX_REFCOUNT
20156 + jno 0f
20157 + subl $1, (v)
20158 + sbbl $0, 4(v)
20159 + int $4
20160 +0:
20161 + _ASM_EXTABLE(0b, 0b)
20162 +#endif
20163 +
20164 +RET_ENDP
20165 +BEGIN(inc_unchecked)
20166 + addl $1, (v)
20167 + adcl $0, 4(v)
20168 RET_ENDP
20169 #undef v
20170
20171 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20172 movl 4(v), %edx
20173 addl $1, %eax
20174 adcl $0, %edx
20175 +
20176 +#ifdef CONFIG_PAX_REFCOUNT
20177 + into
20178 +1234:
20179 + _ASM_EXTABLE(1234b, 2f)
20180 +#endif
20181 +
20182 + movl %eax, (v)
20183 + movl %edx, 4(v)
20184 +
20185 +#ifdef CONFIG_PAX_REFCOUNT
20186 +2:
20187 +#endif
20188 +
20189 +RET_ENDP
20190 +BEGIN(inc_return_unchecked)
20191 + movl (v), %eax
20192 + movl 4(v), %edx
20193 + addl $1, %eax
20194 + adcl $0, %edx
20195 movl %eax, (v)
20196 movl %edx, 4(v)
20197 RET_ENDP
20198 @@ -123,6 +232,20 @@ RET_ENDP
20199 BEGIN(dec)
20200 subl $1, (v)
20201 sbbl $0, 4(v)
20202 +
20203 +#ifdef CONFIG_PAX_REFCOUNT
20204 + jno 0f
20205 + addl $1, (v)
20206 + adcl $0, 4(v)
20207 + int $4
20208 +0:
20209 + _ASM_EXTABLE(0b, 0b)
20210 +#endif
20211 +
20212 +RET_ENDP
20213 +BEGIN(dec_unchecked)
20214 + subl $1, (v)
20215 + sbbl $0, 4(v)
20216 RET_ENDP
20217 #undef v
20218
20219 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20220 movl 4(v), %edx
20221 subl $1, %eax
20222 sbbl $0, %edx
20223 +
20224 +#ifdef CONFIG_PAX_REFCOUNT
20225 + into
20226 +1234:
20227 + _ASM_EXTABLE(1234b, 2f)
20228 +#endif
20229 +
20230 + movl %eax, (v)
20231 + movl %edx, 4(v)
20232 +
20233 +#ifdef CONFIG_PAX_REFCOUNT
20234 +2:
20235 +#endif
20236 +
20237 +RET_ENDP
20238 +BEGIN(dec_return_unchecked)
20239 + movl (v), %eax
20240 + movl 4(v), %edx
20241 + subl $1, %eax
20242 + sbbl $0, %edx
20243 movl %eax, (v)
20244 movl %edx, 4(v)
20245 RET_ENDP
20246 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20247 adcl %edx, %edi
20248 addl (v), %eax
20249 adcl 4(v), %edx
20250 +
20251 +#ifdef CONFIG_PAX_REFCOUNT
20252 + into
20253 +1234:
20254 + _ASM_EXTABLE(1234b, 2f)
20255 +#endif
20256 +
20257 cmpl %eax, %esi
20258 je 3f
20259 1:
20260 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20261 1:
20262 addl $1, %eax
20263 adcl $0, %edx
20264 +
20265 +#ifdef CONFIG_PAX_REFCOUNT
20266 + into
20267 +1234:
20268 + _ASM_EXTABLE(1234b, 2f)
20269 +#endif
20270 +
20271 movl %eax, (v)
20272 movl %edx, 4(v)
20273 movl $1, %eax
20274 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20275 movl 4(v), %edx
20276 subl $1, %eax
20277 sbbl $0, %edx
20278 +
20279 +#ifdef CONFIG_PAX_REFCOUNT
20280 + into
20281 +1234:
20282 + _ASM_EXTABLE(1234b, 1f)
20283 +#endif
20284 +
20285 js 1f
20286 movl %eax, (v)
20287 movl %edx, 4(v)
20288 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20289 index 391a083..d658e9f 100644
20290 --- a/arch/x86/lib/atomic64_cx8_32.S
20291 +++ b/arch/x86/lib/atomic64_cx8_32.S
20292 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20293 CFI_STARTPROC
20294
20295 read64 %ecx
20296 + pax_force_retaddr
20297 ret
20298 CFI_ENDPROC
20299 ENDPROC(atomic64_read_cx8)
20300
20301 +ENTRY(atomic64_read_unchecked_cx8)
20302 + CFI_STARTPROC
20303 +
20304 + read64 %ecx
20305 + pax_force_retaddr
20306 + ret
20307 + CFI_ENDPROC
20308 +ENDPROC(atomic64_read_unchecked_cx8)
20309 +
20310 ENTRY(atomic64_set_cx8)
20311 CFI_STARTPROC
20312
20313 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20314 cmpxchg8b (%esi)
20315 jne 1b
20316
20317 + pax_force_retaddr
20318 ret
20319 CFI_ENDPROC
20320 ENDPROC(atomic64_set_cx8)
20321
20322 +ENTRY(atomic64_set_unchecked_cx8)
20323 + CFI_STARTPROC
20324 +
20325 +1:
20326 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20327 + * are atomic on 586 and newer */
20328 + cmpxchg8b (%esi)
20329 + jne 1b
20330 +
20331 + pax_force_retaddr
20332 + ret
20333 + CFI_ENDPROC
20334 +ENDPROC(atomic64_set_unchecked_cx8)
20335 +
20336 ENTRY(atomic64_xchg_cx8)
20337 CFI_STARTPROC
20338
20339 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20340 cmpxchg8b (%esi)
20341 jne 1b
20342
20343 + pax_force_retaddr
20344 ret
20345 CFI_ENDPROC
20346 ENDPROC(atomic64_xchg_cx8)
20347
20348 -.macro addsub_return func ins insc
20349 -ENTRY(atomic64_\func\()_return_cx8)
20350 +.macro addsub_return func ins insc unchecked=""
20351 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20352 CFI_STARTPROC
20353 SAVE ebp
20354 SAVE ebx
20355 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20356 movl %edx, %ecx
20357 \ins\()l %esi, %ebx
20358 \insc\()l %edi, %ecx
20359 +
20360 +.ifb \unchecked
20361 +#ifdef CONFIG_PAX_REFCOUNT
20362 + into
20363 +2:
20364 + _ASM_EXTABLE(2b, 3f)
20365 +#endif
20366 +.endif
20367 +
20368 LOCK_PREFIX
20369 cmpxchg8b (%ebp)
20370 jne 1b
20371 -
20372 -10:
20373 movl %ebx, %eax
20374 movl %ecx, %edx
20375 +
20376 +.ifb \unchecked
20377 +#ifdef CONFIG_PAX_REFCOUNT
20378 +3:
20379 +#endif
20380 +.endif
20381 +
20382 RESTORE edi
20383 RESTORE esi
20384 RESTORE ebx
20385 RESTORE ebp
20386 + pax_force_retaddr
20387 ret
20388 CFI_ENDPROC
20389 -ENDPROC(atomic64_\func\()_return_cx8)
20390 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20391 .endm
20392
20393 addsub_return add add adc
20394 addsub_return sub sub sbb
20395 +addsub_return add add adc _unchecked
20396 +addsub_return sub sub sbb _unchecked
20397
20398 -.macro incdec_return func ins insc
20399 -ENTRY(atomic64_\func\()_return_cx8)
20400 +.macro incdec_return func ins insc unchecked
20401 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20402 CFI_STARTPROC
20403 SAVE ebx
20404
20405 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20406 movl %edx, %ecx
20407 \ins\()l $1, %ebx
20408 \insc\()l $0, %ecx
20409 +
20410 +.ifb \unchecked
20411 +#ifdef CONFIG_PAX_REFCOUNT
20412 + into
20413 +2:
20414 + _ASM_EXTABLE(2b, 3f)
20415 +#endif
20416 +.endif
20417 +
20418 LOCK_PREFIX
20419 cmpxchg8b (%esi)
20420 jne 1b
20421
20422 -10:
20423 movl %ebx, %eax
20424 movl %ecx, %edx
20425 +
20426 +.ifb \unchecked
20427 +#ifdef CONFIG_PAX_REFCOUNT
20428 +3:
20429 +#endif
20430 +.endif
20431 +
20432 RESTORE ebx
20433 + pax_force_retaddr
20434 ret
20435 CFI_ENDPROC
20436 -ENDPROC(atomic64_\func\()_return_cx8)
20437 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20438 .endm
20439
20440 incdec_return inc add adc
20441 incdec_return dec sub sbb
20442 +incdec_return inc add adc _unchecked
20443 +incdec_return dec sub sbb _unchecked
20444
20445 ENTRY(atomic64_dec_if_positive_cx8)
20446 CFI_STARTPROC
20447 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20448 movl %edx, %ecx
20449 subl $1, %ebx
20450 sbb $0, %ecx
20451 +
20452 +#ifdef CONFIG_PAX_REFCOUNT
20453 + into
20454 +1234:
20455 + _ASM_EXTABLE(1234b, 2f)
20456 +#endif
20457 +
20458 js 2f
20459 LOCK_PREFIX
20460 cmpxchg8b (%esi)
20461 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20462 movl %ebx, %eax
20463 movl %ecx, %edx
20464 RESTORE ebx
20465 + pax_force_retaddr
20466 ret
20467 CFI_ENDPROC
20468 ENDPROC(atomic64_dec_if_positive_cx8)
20469 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20470 movl %edx, %ecx
20471 addl %esi, %ebx
20472 adcl %edi, %ecx
20473 +
20474 +#ifdef CONFIG_PAX_REFCOUNT
20475 + into
20476 +1234:
20477 + _ASM_EXTABLE(1234b, 3f)
20478 +#endif
20479 +
20480 LOCK_PREFIX
20481 cmpxchg8b (%ebp)
20482 jne 1b
20483 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20484 CFI_ADJUST_CFA_OFFSET -8
20485 RESTORE ebx
20486 RESTORE ebp
20487 + pax_force_retaddr
20488 ret
20489 4:
20490 cmpl %edx, 4(%esp)
20491 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20492 movl %edx, %ecx
20493 addl $1, %ebx
20494 adcl $0, %ecx
20495 +
20496 +#ifdef CONFIG_PAX_REFCOUNT
20497 + into
20498 +1234:
20499 + _ASM_EXTABLE(1234b, 3f)
20500 +#endif
20501 +
20502 LOCK_PREFIX
20503 cmpxchg8b (%esi)
20504 jne 1b
20505 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20506 movl $1, %eax
20507 3:
20508 RESTORE ebx
20509 + pax_force_retaddr
20510 ret
20511 4:
20512 testl %edx, %edx
20513 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20514 index 78d16a5..fbcf666 100644
20515 --- a/arch/x86/lib/checksum_32.S
20516 +++ b/arch/x86/lib/checksum_32.S
20517 @@ -28,7 +28,8 @@
20518 #include <linux/linkage.h>
20519 #include <asm/dwarf2.h>
20520 #include <asm/errno.h>
20521 -
20522 +#include <asm/segment.h>
20523 +
20524 /*
20525 * computes a partial checksum, e.g. for TCP/UDP fragments
20526 */
20527 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20528
20529 #define ARGBASE 16
20530 #define FP 12
20531 -
20532 -ENTRY(csum_partial_copy_generic)
20533 +
20534 +ENTRY(csum_partial_copy_generic_to_user)
20535 CFI_STARTPROC
20536 +
20537 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20538 + pushl_cfi %gs
20539 + popl_cfi %es
20540 + jmp csum_partial_copy_generic
20541 +#endif
20542 +
20543 +ENTRY(csum_partial_copy_generic_from_user)
20544 +
20545 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20546 + pushl_cfi %gs
20547 + popl_cfi %ds
20548 +#endif
20549 +
20550 +ENTRY(csum_partial_copy_generic)
20551 subl $4,%esp
20552 CFI_ADJUST_CFA_OFFSET 4
20553 pushl_cfi %edi
20554 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20555 jmp 4f
20556 SRC(1: movw (%esi), %bx )
20557 addl $2, %esi
20558 -DST( movw %bx, (%edi) )
20559 +DST( movw %bx, %es:(%edi) )
20560 addl $2, %edi
20561 addw %bx, %ax
20562 adcl $0, %eax
20563 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20564 SRC(1: movl (%esi), %ebx )
20565 SRC( movl 4(%esi), %edx )
20566 adcl %ebx, %eax
20567 -DST( movl %ebx, (%edi) )
20568 +DST( movl %ebx, %es:(%edi) )
20569 adcl %edx, %eax
20570 -DST( movl %edx, 4(%edi) )
20571 +DST( movl %edx, %es:4(%edi) )
20572
20573 SRC( movl 8(%esi), %ebx )
20574 SRC( movl 12(%esi), %edx )
20575 adcl %ebx, %eax
20576 -DST( movl %ebx, 8(%edi) )
20577 +DST( movl %ebx, %es:8(%edi) )
20578 adcl %edx, %eax
20579 -DST( movl %edx, 12(%edi) )
20580 +DST( movl %edx, %es:12(%edi) )
20581
20582 SRC( movl 16(%esi), %ebx )
20583 SRC( movl 20(%esi), %edx )
20584 adcl %ebx, %eax
20585 -DST( movl %ebx, 16(%edi) )
20586 +DST( movl %ebx, %es:16(%edi) )
20587 adcl %edx, %eax
20588 -DST( movl %edx, 20(%edi) )
20589 +DST( movl %edx, %es:20(%edi) )
20590
20591 SRC( movl 24(%esi), %ebx )
20592 SRC( movl 28(%esi), %edx )
20593 adcl %ebx, %eax
20594 -DST( movl %ebx, 24(%edi) )
20595 +DST( movl %ebx, %es:24(%edi) )
20596 adcl %edx, %eax
20597 -DST( movl %edx, 28(%edi) )
20598 +DST( movl %edx, %es:28(%edi) )
20599
20600 lea 32(%esi), %esi
20601 lea 32(%edi), %edi
20602 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
20603 shrl $2, %edx # This clears CF
20604 SRC(3: movl (%esi), %ebx )
20605 adcl %ebx, %eax
20606 -DST( movl %ebx, (%edi) )
20607 +DST( movl %ebx, %es:(%edi) )
20608 lea 4(%esi), %esi
20609 lea 4(%edi), %edi
20610 dec %edx
20611 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
20612 jb 5f
20613 SRC( movw (%esi), %cx )
20614 leal 2(%esi), %esi
20615 -DST( movw %cx, (%edi) )
20616 +DST( movw %cx, %es:(%edi) )
20617 leal 2(%edi), %edi
20618 je 6f
20619 shll $16,%ecx
20620 SRC(5: movb (%esi), %cl )
20621 -DST( movb %cl, (%edi) )
20622 +DST( movb %cl, %es:(%edi) )
20623 6: addl %ecx, %eax
20624 adcl $0, %eax
20625 7:
20626 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
20627
20628 6001:
20629 movl ARGBASE+20(%esp), %ebx # src_err_ptr
20630 - movl $-EFAULT, (%ebx)
20631 + movl $-EFAULT, %ss:(%ebx)
20632
20633 # zero the complete destination - computing the rest
20634 # is too much work
20635 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
20636
20637 6002:
20638 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20639 - movl $-EFAULT,(%ebx)
20640 + movl $-EFAULT,%ss:(%ebx)
20641 jmp 5000b
20642
20643 .previous
20644
20645 + pushl_cfi %ss
20646 + popl_cfi %ds
20647 + pushl_cfi %ss
20648 + popl_cfi %es
20649 popl_cfi %ebx
20650 CFI_RESTORE ebx
20651 popl_cfi %esi
20652 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
20653 popl_cfi %ecx # equivalent to addl $4,%esp
20654 ret
20655 CFI_ENDPROC
20656 -ENDPROC(csum_partial_copy_generic)
20657 +ENDPROC(csum_partial_copy_generic_to_user)
20658
20659 #else
20660
20661 /* Version for PentiumII/PPro */
20662
20663 #define ROUND1(x) \
20664 + nop; nop; nop; \
20665 SRC(movl x(%esi), %ebx ) ; \
20666 addl %ebx, %eax ; \
20667 - DST(movl %ebx, x(%edi) ) ;
20668 + DST(movl %ebx, %es:x(%edi)) ;
20669
20670 #define ROUND(x) \
20671 + nop; nop; nop; \
20672 SRC(movl x(%esi), %ebx ) ; \
20673 adcl %ebx, %eax ; \
20674 - DST(movl %ebx, x(%edi) ) ;
20675 + DST(movl %ebx, %es:x(%edi)) ;
20676
20677 #define ARGBASE 12
20678 -
20679 -ENTRY(csum_partial_copy_generic)
20680 +
20681 +ENTRY(csum_partial_copy_generic_to_user)
20682 CFI_STARTPROC
20683 +
20684 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20685 + pushl_cfi %gs
20686 + popl_cfi %es
20687 + jmp csum_partial_copy_generic
20688 +#endif
20689 +
20690 +ENTRY(csum_partial_copy_generic_from_user)
20691 +
20692 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20693 + pushl_cfi %gs
20694 + popl_cfi %ds
20695 +#endif
20696 +
20697 +ENTRY(csum_partial_copy_generic)
20698 pushl_cfi %ebx
20699 CFI_REL_OFFSET ebx, 0
20700 pushl_cfi %edi
20701 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
20702 subl %ebx, %edi
20703 lea -1(%esi),%edx
20704 andl $-32,%edx
20705 - lea 3f(%ebx,%ebx), %ebx
20706 + lea 3f(%ebx,%ebx,2), %ebx
20707 testl %esi, %esi
20708 jmp *%ebx
20709 1: addl $64,%esi
20710 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
20711 jb 5f
20712 SRC( movw (%esi), %dx )
20713 leal 2(%esi), %esi
20714 -DST( movw %dx, (%edi) )
20715 +DST( movw %dx, %es:(%edi) )
20716 leal 2(%edi), %edi
20717 je 6f
20718 shll $16,%edx
20719 5:
20720 SRC( movb (%esi), %dl )
20721 -DST( movb %dl, (%edi) )
20722 +DST( movb %dl, %es:(%edi) )
20723 6: addl %edx, %eax
20724 adcl $0, %eax
20725 7:
20726 .section .fixup, "ax"
20727 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
20728 - movl $-EFAULT, (%ebx)
20729 + movl $-EFAULT, %ss:(%ebx)
20730 # zero the complete destination (computing the rest is too much work)
20731 movl ARGBASE+8(%esp),%edi # dst
20732 movl ARGBASE+12(%esp),%ecx # len
20733 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
20734 rep; stosb
20735 jmp 7b
20736 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20737 - movl $-EFAULT, (%ebx)
20738 + movl $-EFAULT, %ss:(%ebx)
20739 jmp 7b
20740 .previous
20741
20742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20743 + pushl_cfi %ss
20744 + popl_cfi %ds
20745 + pushl_cfi %ss
20746 + popl_cfi %es
20747 +#endif
20748 +
20749 popl_cfi %esi
20750 CFI_RESTORE esi
20751 popl_cfi %edi
20752 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
20753 CFI_RESTORE ebx
20754 ret
20755 CFI_ENDPROC
20756 -ENDPROC(csum_partial_copy_generic)
20757 +ENDPROC(csum_partial_copy_generic_to_user)
20758
20759 #undef ROUND
20760 #undef ROUND1
20761 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
20762 index f2145cf..cea889d 100644
20763 --- a/arch/x86/lib/clear_page_64.S
20764 +++ b/arch/x86/lib/clear_page_64.S
20765 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
20766 movl $4096/8,%ecx
20767 xorl %eax,%eax
20768 rep stosq
20769 + pax_force_retaddr
20770 ret
20771 CFI_ENDPROC
20772 ENDPROC(clear_page_c)
20773 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
20774 movl $4096,%ecx
20775 xorl %eax,%eax
20776 rep stosb
20777 + pax_force_retaddr
20778 ret
20779 CFI_ENDPROC
20780 ENDPROC(clear_page_c_e)
20781 @@ -43,6 +45,7 @@ ENTRY(clear_page)
20782 leaq 64(%rdi),%rdi
20783 jnz .Lloop
20784 nop
20785 + pax_force_retaddr
20786 ret
20787 CFI_ENDPROC
20788 .Lclear_page_end:
20789 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
20790
20791 #include <asm/cpufeature.h>
20792
20793 - .section .altinstr_replacement,"ax"
20794 + .section .altinstr_replacement,"a"
20795 1: .byte 0xeb /* jmp <disp8> */
20796 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
20797 2: .byte 0xeb /* jmp <disp8> */
20798 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
20799 index 1e572c5..2a162cd 100644
20800 --- a/arch/x86/lib/cmpxchg16b_emu.S
20801 +++ b/arch/x86/lib/cmpxchg16b_emu.S
20802 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
20803
20804 popf
20805 mov $1, %al
20806 + pax_force_retaddr
20807 ret
20808
20809 not_same:
20810 popf
20811 xor %al,%al
20812 + pax_force_retaddr
20813 ret
20814
20815 CFI_ENDPROC
20816 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
20817 index 01c805b..dccb07f 100644
20818 --- a/arch/x86/lib/copy_page_64.S
20819 +++ b/arch/x86/lib/copy_page_64.S
20820 @@ -9,6 +9,7 @@ copy_page_c:
20821 CFI_STARTPROC
20822 movl $4096/8,%ecx
20823 rep movsq
20824 + pax_force_retaddr
20825 ret
20826 CFI_ENDPROC
20827 ENDPROC(copy_page_c)
20828 @@ -39,7 +40,7 @@ ENTRY(copy_page)
20829 movq 16 (%rsi), %rdx
20830 movq 24 (%rsi), %r8
20831 movq 32 (%rsi), %r9
20832 - movq 40 (%rsi), %r10
20833 + movq 40 (%rsi), %r13
20834 movq 48 (%rsi), %r11
20835 movq 56 (%rsi), %r12
20836
20837 @@ -50,7 +51,7 @@ ENTRY(copy_page)
20838 movq %rdx, 16 (%rdi)
20839 movq %r8, 24 (%rdi)
20840 movq %r9, 32 (%rdi)
20841 - movq %r10, 40 (%rdi)
20842 + movq %r13, 40 (%rdi)
20843 movq %r11, 48 (%rdi)
20844 movq %r12, 56 (%rdi)
20845
20846 @@ -69,7 +70,7 @@ ENTRY(copy_page)
20847 movq 16 (%rsi), %rdx
20848 movq 24 (%rsi), %r8
20849 movq 32 (%rsi), %r9
20850 - movq 40 (%rsi), %r10
20851 + movq 40 (%rsi), %r13
20852 movq 48 (%rsi), %r11
20853 movq 56 (%rsi), %r12
20854
20855 @@ -78,7 +79,7 @@ ENTRY(copy_page)
20856 movq %rdx, 16 (%rdi)
20857 movq %r8, 24 (%rdi)
20858 movq %r9, 32 (%rdi)
20859 - movq %r10, 40 (%rdi)
20860 + movq %r13, 40 (%rdi)
20861 movq %r11, 48 (%rdi)
20862 movq %r12, 56 (%rdi)
20863
20864 @@ -95,6 +96,7 @@ ENTRY(copy_page)
20865 CFI_RESTORE r13
20866 addq $3*8,%rsp
20867 CFI_ADJUST_CFA_OFFSET -3*8
20868 + pax_force_retaddr
20869 ret
20870 .Lcopy_page_end:
20871 CFI_ENDPROC
20872 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
20873
20874 #include <asm/cpufeature.h>
20875
20876 - .section .altinstr_replacement,"ax"
20877 + .section .altinstr_replacement,"a"
20878 1: .byte 0xeb /* jmp <disp8> */
20879 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
20880 2:
20881 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
20882 index 0248402..821c786 100644
20883 --- a/arch/x86/lib/copy_user_64.S
20884 +++ b/arch/x86/lib/copy_user_64.S
20885 @@ -16,6 +16,7 @@
20886 #include <asm/thread_info.h>
20887 #include <asm/cpufeature.h>
20888 #include <asm/alternative-asm.h>
20889 +#include <asm/pgtable.h>
20890
20891 /*
20892 * By placing feature2 after feature1 in altinstructions section, we logically
20893 @@ -29,7 +30,7 @@
20894 .byte 0xe9 /* 32bit jump */
20895 .long \orig-1f /* by default jump to orig */
20896 1:
20897 - .section .altinstr_replacement,"ax"
20898 + .section .altinstr_replacement,"a"
20899 2: .byte 0xe9 /* near jump with 32bit immediate */
20900 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
20901 3: .byte 0xe9 /* near jump with 32bit immediate */
20902 @@ -71,47 +72,20 @@
20903 #endif
20904 .endm
20905
20906 -/* Standard copy_to_user with segment limit checking */
20907 -ENTRY(_copy_to_user)
20908 - CFI_STARTPROC
20909 - GET_THREAD_INFO(%rax)
20910 - movq %rdi,%rcx
20911 - addq %rdx,%rcx
20912 - jc bad_to_user
20913 - cmpq TI_addr_limit(%rax),%rcx
20914 - ja bad_to_user
20915 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20916 - copy_user_generic_unrolled,copy_user_generic_string, \
20917 - copy_user_enhanced_fast_string
20918 - CFI_ENDPROC
20919 -ENDPROC(_copy_to_user)
20920 -
20921 -/* Standard copy_from_user with segment limit checking */
20922 -ENTRY(_copy_from_user)
20923 - CFI_STARTPROC
20924 - GET_THREAD_INFO(%rax)
20925 - movq %rsi,%rcx
20926 - addq %rdx,%rcx
20927 - jc bad_from_user
20928 - cmpq TI_addr_limit(%rax),%rcx
20929 - ja bad_from_user
20930 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20931 - copy_user_generic_unrolled,copy_user_generic_string, \
20932 - copy_user_enhanced_fast_string
20933 - CFI_ENDPROC
20934 -ENDPROC(_copy_from_user)
20935 -
20936 .section .fixup,"ax"
20937 /* must zero dest */
20938 ENTRY(bad_from_user)
20939 bad_from_user:
20940 CFI_STARTPROC
20941 + testl %edx,%edx
20942 + js bad_to_user
20943 movl %edx,%ecx
20944 xorl %eax,%eax
20945 rep
20946 stosb
20947 bad_to_user:
20948 movl %edx,%eax
20949 + pax_force_retaddr
20950 ret
20951 CFI_ENDPROC
20952 ENDPROC(bad_from_user)
20953 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
20954 jz 17f
20955 1: movq (%rsi),%r8
20956 2: movq 1*8(%rsi),%r9
20957 -3: movq 2*8(%rsi),%r10
20958 +3: movq 2*8(%rsi),%rax
20959 4: movq 3*8(%rsi),%r11
20960 5: movq %r8,(%rdi)
20961 6: movq %r9,1*8(%rdi)
20962 -7: movq %r10,2*8(%rdi)
20963 +7: movq %rax,2*8(%rdi)
20964 8: movq %r11,3*8(%rdi)
20965 9: movq 4*8(%rsi),%r8
20966 10: movq 5*8(%rsi),%r9
20967 -11: movq 6*8(%rsi),%r10
20968 +11: movq 6*8(%rsi),%rax
20969 12: movq 7*8(%rsi),%r11
20970 13: movq %r8,4*8(%rdi)
20971 14: movq %r9,5*8(%rdi)
20972 -15: movq %r10,6*8(%rdi)
20973 +15: movq %rax,6*8(%rdi)
20974 16: movq %r11,7*8(%rdi)
20975 leaq 64(%rsi),%rsi
20976 leaq 64(%rdi),%rdi
20977 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
20978 decl %ecx
20979 jnz 21b
20980 23: xor %eax,%eax
20981 + pax_force_retaddr
20982 ret
20983
20984 .section .fixup,"ax"
20985 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
20986 3: rep
20987 movsb
20988 4: xorl %eax,%eax
20989 + pax_force_retaddr
20990 ret
20991
20992 .section .fixup,"ax"
20993 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
20994 1: rep
20995 movsb
20996 2: xorl %eax,%eax
20997 + pax_force_retaddr
20998 ret
20999
21000 .section .fixup,"ax"
21001 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21002 index cb0c112..e3a6895 100644
21003 --- a/arch/x86/lib/copy_user_nocache_64.S
21004 +++ b/arch/x86/lib/copy_user_nocache_64.S
21005 @@ -8,12 +8,14 @@
21006
21007 #include <linux/linkage.h>
21008 #include <asm/dwarf2.h>
21009 +#include <asm/alternative-asm.h>
21010
21011 #define FIX_ALIGNMENT 1
21012
21013 #include <asm/current.h>
21014 #include <asm/asm-offsets.h>
21015 #include <asm/thread_info.h>
21016 +#include <asm/pgtable.h>
21017
21018 .macro ALIGN_DESTINATION
21019 #ifdef FIX_ALIGNMENT
21020 @@ -50,6 +52,15 @@
21021 */
21022 ENTRY(__copy_user_nocache)
21023 CFI_STARTPROC
21024 +
21025 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21026 + mov $PAX_USER_SHADOW_BASE,%rcx
21027 + cmp %rcx,%rsi
21028 + jae 1f
21029 + add %rcx,%rsi
21030 +1:
21031 +#endif
21032 +
21033 cmpl $8,%edx
21034 jb 20f /* less then 8 bytes, go to byte copy loop */
21035 ALIGN_DESTINATION
21036 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21037 jz 17f
21038 1: movq (%rsi),%r8
21039 2: movq 1*8(%rsi),%r9
21040 -3: movq 2*8(%rsi),%r10
21041 +3: movq 2*8(%rsi),%rax
21042 4: movq 3*8(%rsi),%r11
21043 5: movnti %r8,(%rdi)
21044 6: movnti %r9,1*8(%rdi)
21045 -7: movnti %r10,2*8(%rdi)
21046 +7: movnti %rax,2*8(%rdi)
21047 8: movnti %r11,3*8(%rdi)
21048 9: movq 4*8(%rsi),%r8
21049 10: movq 5*8(%rsi),%r9
21050 -11: movq 6*8(%rsi),%r10
21051 +11: movq 6*8(%rsi),%rax
21052 12: movq 7*8(%rsi),%r11
21053 13: movnti %r8,4*8(%rdi)
21054 14: movnti %r9,5*8(%rdi)
21055 -15: movnti %r10,6*8(%rdi)
21056 +15: movnti %rax,6*8(%rdi)
21057 16: movnti %r11,7*8(%rdi)
21058 leaq 64(%rsi),%rsi
21059 leaq 64(%rdi),%rdi
21060 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21061 jnz 21b
21062 23: xorl %eax,%eax
21063 sfence
21064 + pax_force_retaddr
21065 ret
21066
21067 .section .fixup,"ax"
21068 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21069 index fb903b7..c92b7f7 100644
21070 --- a/arch/x86/lib/csum-copy_64.S
21071 +++ b/arch/x86/lib/csum-copy_64.S
21072 @@ -8,6 +8,7 @@
21073 #include <linux/linkage.h>
21074 #include <asm/dwarf2.h>
21075 #include <asm/errno.h>
21076 +#include <asm/alternative-asm.h>
21077
21078 /*
21079 * Checksum copy with exception handling.
21080 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21081 CFI_RESTORE rbp
21082 addq $7*8, %rsp
21083 CFI_ADJUST_CFA_OFFSET -7*8
21084 + pax_force_retaddr 0, 1
21085 ret
21086 CFI_RESTORE_STATE
21087
21088 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21089 index 459b58a..9570bc7 100644
21090 --- a/arch/x86/lib/csum-wrappers_64.c
21091 +++ b/arch/x86/lib/csum-wrappers_64.c
21092 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21093 len -= 2;
21094 }
21095 }
21096 - isum = csum_partial_copy_generic((__force const void *)src,
21097 +
21098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21099 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21100 + src += PAX_USER_SHADOW_BASE;
21101 +#endif
21102 +
21103 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21104 dst, len, isum, errp, NULL);
21105 if (unlikely(*errp))
21106 goto out_err;
21107 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21108 }
21109
21110 *errp = 0;
21111 - return csum_partial_copy_generic(src, (void __force *)dst,
21112 +
21113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21114 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21115 + dst += PAX_USER_SHADOW_BASE;
21116 +#endif
21117 +
21118 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21119 len, isum, NULL, errp);
21120 }
21121 EXPORT_SYMBOL(csum_partial_copy_to_user);
21122 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21123 index 51f1504..ddac4c1 100644
21124 --- a/arch/x86/lib/getuser.S
21125 +++ b/arch/x86/lib/getuser.S
21126 @@ -33,15 +33,38 @@
21127 #include <asm/asm-offsets.h>
21128 #include <asm/thread_info.h>
21129 #include <asm/asm.h>
21130 +#include <asm/segment.h>
21131 +#include <asm/pgtable.h>
21132 +#include <asm/alternative-asm.h>
21133 +
21134 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21135 +#define __copyuser_seg gs;
21136 +#else
21137 +#define __copyuser_seg
21138 +#endif
21139
21140 .text
21141 ENTRY(__get_user_1)
21142 CFI_STARTPROC
21143 +
21144 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21145 GET_THREAD_INFO(%_ASM_DX)
21146 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21147 jae bad_get_user
21148 -1: movzb (%_ASM_AX),%edx
21149 +
21150 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21151 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21152 + cmp %_ASM_DX,%_ASM_AX
21153 + jae 1234f
21154 + add %_ASM_DX,%_ASM_AX
21155 +1234:
21156 +#endif
21157 +
21158 +#endif
21159 +
21160 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21161 xor %eax,%eax
21162 + pax_force_retaddr
21163 ret
21164 CFI_ENDPROC
21165 ENDPROC(__get_user_1)
21166 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21167 ENTRY(__get_user_2)
21168 CFI_STARTPROC
21169 add $1,%_ASM_AX
21170 +
21171 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21172 jc bad_get_user
21173 GET_THREAD_INFO(%_ASM_DX)
21174 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21175 jae bad_get_user
21176 -2: movzwl -1(%_ASM_AX),%edx
21177 +
21178 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21179 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21180 + cmp %_ASM_DX,%_ASM_AX
21181 + jae 1234f
21182 + add %_ASM_DX,%_ASM_AX
21183 +1234:
21184 +#endif
21185 +
21186 +#endif
21187 +
21188 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21189 xor %eax,%eax
21190 + pax_force_retaddr
21191 ret
21192 CFI_ENDPROC
21193 ENDPROC(__get_user_2)
21194 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21195 ENTRY(__get_user_4)
21196 CFI_STARTPROC
21197 add $3,%_ASM_AX
21198 +
21199 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21200 jc bad_get_user
21201 GET_THREAD_INFO(%_ASM_DX)
21202 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21203 jae bad_get_user
21204 -3: mov -3(%_ASM_AX),%edx
21205 +
21206 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21207 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21208 + cmp %_ASM_DX,%_ASM_AX
21209 + jae 1234f
21210 + add %_ASM_DX,%_ASM_AX
21211 +1234:
21212 +#endif
21213 +
21214 +#endif
21215 +
21216 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21217 xor %eax,%eax
21218 + pax_force_retaddr
21219 ret
21220 CFI_ENDPROC
21221 ENDPROC(__get_user_4)
21222 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21223 GET_THREAD_INFO(%_ASM_DX)
21224 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21225 jae bad_get_user
21226 +
21227 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21228 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21229 + cmp %_ASM_DX,%_ASM_AX
21230 + jae 1234f
21231 + add %_ASM_DX,%_ASM_AX
21232 +1234:
21233 +#endif
21234 +
21235 4: movq -7(%_ASM_AX),%_ASM_DX
21236 xor %eax,%eax
21237 + pax_force_retaddr
21238 ret
21239 CFI_ENDPROC
21240 ENDPROC(__get_user_8)
21241 @@ -91,6 +152,7 @@ bad_get_user:
21242 CFI_STARTPROC
21243 xor %edx,%edx
21244 mov $(-EFAULT),%_ASM_AX
21245 + pax_force_retaddr
21246 ret
21247 CFI_ENDPROC
21248 END(bad_get_user)
21249 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21250 index 5a1f9f3..ba9f577 100644
21251 --- a/arch/x86/lib/insn.c
21252 +++ b/arch/x86/lib/insn.c
21253 @@ -21,6 +21,11 @@
21254 #include <linux/string.h>
21255 #include <asm/inat.h>
21256 #include <asm/insn.h>
21257 +#ifdef __KERNEL__
21258 +#include <asm/pgtable_types.h>
21259 +#else
21260 +#define ktla_ktva(addr) addr
21261 +#endif
21262
21263 /* Verify next sizeof(t) bytes can be on the same instruction */
21264 #define validate_next(t, insn, n) \
21265 @@ -49,8 +54,8 @@
21266 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21267 {
21268 memset(insn, 0, sizeof(*insn));
21269 - insn->kaddr = kaddr;
21270 - insn->next_byte = kaddr;
21271 + insn->kaddr = ktla_ktva(kaddr);
21272 + insn->next_byte = ktla_ktva(kaddr);
21273 insn->x86_64 = x86_64 ? 1 : 0;
21274 insn->opnd_bytes = 4;
21275 if (x86_64)
21276 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21277 index 05a95e7..326f2fa 100644
21278 --- a/arch/x86/lib/iomap_copy_64.S
21279 +++ b/arch/x86/lib/iomap_copy_64.S
21280 @@ -17,6 +17,7 @@
21281
21282 #include <linux/linkage.h>
21283 #include <asm/dwarf2.h>
21284 +#include <asm/alternative-asm.h>
21285
21286 /*
21287 * override generic version in lib/iomap_copy.c
21288 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21289 CFI_STARTPROC
21290 movl %edx,%ecx
21291 rep movsd
21292 + pax_force_retaddr
21293 ret
21294 CFI_ENDPROC
21295 ENDPROC(__iowrite32_copy)
21296 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21297 index efbf2a0..8893637 100644
21298 --- a/arch/x86/lib/memcpy_64.S
21299 +++ b/arch/x86/lib/memcpy_64.S
21300 @@ -34,6 +34,7 @@
21301 rep movsq
21302 movl %edx, %ecx
21303 rep movsb
21304 + pax_force_retaddr
21305 ret
21306 .Lmemcpy_e:
21307 .previous
21308 @@ -51,6 +52,7 @@
21309
21310 movl %edx, %ecx
21311 rep movsb
21312 + pax_force_retaddr
21313 ret
21314 .Lmemcpy_e_e:
21315 .previous
21316 @@ -81,13 +83,13 @@ ENTRY(memcpy)
21317 */
21318 movq 0*8(%rsi), %r8
21319 movq 1*8(%rsi), %r9
21320 - movq 2*8(%rsi), %r10
21321 + movq 2*8(%rsi), %rcx
21322 movq 3*8(%rsi), %r11
21323 leaq 4*8(%rsi), %rsi
21324
21325 movq %r8, 0*8(%rdi)
21326 movq %r9, 1*8(%rdi)
21327 - movq %r10, 2*8(%rdi)
21328 + movq %rcx, 2*8(%rdi)
21329 movq %r11, 3*8(%rdi)
21330 leaq 4*8(%rdi), %rdi
21331 jae .Lcopy_forward_loop
21332 @@ -110,12 +112,12 @@ ENTRY(memcpy)
21333 subq $0x20, %rdx
21334 movq -1*8(%rsi), %r8
21335 movq -2*8(%rsi), %r9
21336 - movq -3*8(%rsi), %r10
21337 + movq -3*8(%rsi), %rcx
21338 movq -4*8(%rsi), %r11
21339 leaq -4*8(%rsi), %rsi
21340 movq %r8, -1*8(%rdi)
21341 movq %r9, -2*8(%rdi)
21342 - movq %r10, -3*8(%rdi)
21343 + movq %rcx, -3*8(%rdi)
21344 movq %r11, -4*8(%rdi)
21345 leaq -4*8(%rdi), %rdi
21346 jae .Lcopy_backward_loop
21347 @@ -135,12 +137,13 @@ ENTRY(memcpy)
21348 */
21349 movq 0*8(%rsi), %r8
21350 movq 1*8(%rsi), %r9
21351 - movq -2*8(%rsi, %rdx), %r10
21352 + movq -2*8(%rsi, %rdx), %rcx
21353 movq -1*8(%rsi, %rdx), %r11
21354 movq %r8, 0*8(%rdi)
21355 movq %r9, 1*8(%rdi)
21356 - movq %r10, -2*8(%rdi, %rdx)
21357 + movq %rcx, -2*8(%rdi, %rdx)
21358 movq %r11, -1*8(%rdi, %rdx)
21359 + pax_force_retaddr
21360 retq
21361 .p2align 4
21362 .Lless_16bytes:
21363 @@ -153,6 +156,7 @@ ENTRY(memcpy)
21364 movq -1*8(%rsi, %rdx), %r9
21365 movq %r8, 0*8(%rdi)
21366 movq %r9, -1*8(%rdi, %rdx)
21367 + pax_force_retaddr
21368 retq
21369 .p2align 4
21370 .Lless_8bytes:
21371 @@ -166,6 +170,7 @@ ENTRY(memcpy)
21372 movl -4(%rsi, %rdx), %r8d
21373 movl %ecx, (%rdi)
21374 movl %r8d, -4(%rdi, %rdx)
21375 + pax_force_retaddr
21376 retq
21377 .p2align 4
21378 .Lless_3bytes:
21379 @@ -183,6 +188,7 @@ ENTRY(memcpy)
21380 jnz .Lloop_1
21381
21382 .Lend:
21383 + pax_force_retaddr
21384 retq
21385 CFI_ENDPROC
21386 ENDPROC(memcpy)
21387 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21388 index ee16461..c39c199 100644
21389 --- a/arch/x86/lib/memmove_64.S
21390 +++ b/arch/x86/lib/memmove_64.S
21391 @@ -61,13 +61,13 @@ ENTRY(memmove)
21392 5:
21393 sub $0x20, %rdx
21394 movq 0*8(%rsi), %r11
21395 - movq 1*8(%rsi), %r10
21396 + movq 1*8(%rsi), %rcx
21397 movq 2*8(%rsi), %r9
21398 movq 3*8(%rsi), %r8
21399 leaq 4*8(%rsi), %rsi
21400
21401 movq %r11, 0*8(%rdi)
21402 - movq %r10, 1*8(%rdi)
21403 + movq %rcx, 1*8(%rdi)
21404 movq %r9, 2*8(%rdi)
21405 movq %r8, 3*8(%rdi)
21406 leaq 4*8(%rdi), %rdi
21407 @@ -81,10 +81,10 @@ ENTRY(memmove)
21408 4:
21409 movq %rdx, %rcx
21410 movq -8(%rsi, %rdx), %r11
21411 - lea -8(%rdi, %rdx), %r10
21412 + lea -8(%rdi, %rdx), %r9
21413 shrq $3, %rcx
21414 rep movsq
21415 - movq %r11, (%r10)
21416 + movq %r11, (%r9)
21417 jmp 13f
21418 .Lmemmove_end_forward:
21419
21420 @@ -95,14 +95,14 @@ ENTRY(memmove)
21421 7:
21422 movq %rdx, %rcx
21423 movq (%rsi), %r11
21424 - movq %rdi, %r10
21425 + movq %rdi, %r9
21426 leaq -8(%rsi, %rdx), %rsi
21427 leaq -8(%rdi, %rdx), %rdi
21428 shrq $3, %rcx
21429 std
21430 rep movsq
21431 cld
21432 - movq %r11, (%r10)
21433 + movq %r11, (%r9)
21434 jmp 13f
21435
21436 /*
21437 @@ -127,13 +127,13 @@ ENTRY(memmove)
21438 8:
21439 subq $0x20, %rdx
21440 movq -1*8(%rsi), %r11
21441 - movq -2*8(%rsi), %r10
21442 + movq -2*8(%rsi), %rcx
21443 movq -3*8(%rsi), %r9
21444 movq -4*8(%rsi), %r8
21445 leaq -4*8(%rsi), %rsi
21446
21447 movq %r11, -1*8(%rdi)
21448 - movq %r10, -2*8(%rdi)
21449 + movq %rcx, -2*8(%rdi)
21450 movq %r9, -3*8(%rdi)
21451 movq %r8, -4*8(%rdi)
21452 leaq -4*8(%rdi), %rdi
21453 @@ -151,11 +151,11 @@ ENTRY(memmove)
21454 * Move data from 16 bytes to 31 bytes.
21455 */
21456 movq 0*8(%rsi), %r11
21457 - movq 1*8(%rsi), %r10
21458 + movq 1*8(%rsi), %rcx
21459 movq -2*8(%rsi, %rdx), %r9
21460 movq -1*8(%rsi, %rdx), %r8
21461 movq %r11, 0*8(%rdi)
21462 - movq %r10, 1*8(%rdi)
21463 + movq %rcx, 1*8(%rdi)
21464 movq %r9, -2*8(%rdi, %rdx)
21465 movq %r8, -1*8(%rdi, %rdx)
21466 jmp 13f
21467 @@ -167,9 +167,9 @@ ENTRY(memmove)
21468 * Move data from 8 bytes to 15 bytes.
21469 */
21470 movq 0*8(%rsi), %r11
21471 - movq -1*8(%rsi, %rdx), %r10
21472 + movq -1*8(%rsi, %rdx), %r9
21473 movq %r11, 0*8(%rdi)
21474 - movq %r10, -1*8(%rdi, %rdx)
21475 + movq %r9, -1*8(%rdi, %rdx)
21476 jmp 13f
21477 10:
21478 cmpq $4, %rdx
21479 @@ -178,9 +178,9 @@ ENTRY(memmove)
21480 * Move data from 4 bytes to 7 bytes.
21481 */
21482 movl (%rsi), %r11d
21483 - movl -4(%rsi, %rdx), %r10d
21484 + movl -4(%rsi, %rdx), %r9d
21485 movl %r11d, (%rdi)
21486 - movl %r10d, -4(%rdi, %rdx)
21487 + movl %r9d, -4(%rdi, %rdx)
21488 jmp 13f
21489 11:
21490 cmp $2, %rdx
21491 @@ -189,9 +189,9 @@ ENTRY(memmove)
21492 * Move data from 2 bytes to 3 bytes.
21493 */
21494 movw (%rsi), %r11w
21495 - movw -2(%rsi, %rdx), %r10w
21496 + movw -2(%rsi, %rdx), %r9w
21497 movw %r11w, (%rdi)
21498 - movw %r10w, -2(%rdi, %rdx)
21499 + movw %r9w, -2(%rdi, %rdx)
21500 jmp 13f
21501 12:
21502 cmp $1, %rdx
21503 @@ -202,6 +202,7 @@ ENTRY(memmove)
21504 movb (%rsi), %r11b
21505 movb %r11b, (%rdi)
21506 13:
21507 + pax_force_retaddr
21508 retq
21509 CFI_ENDPROC
21510
21511 @@ -210,6 +211,7 @@ ENTRY(memmove)
21512 /* Forward moving data. */
21513 movq %rdx, %rcx
21514 rep movsb
21515 + pax_force_retaddr
21516 retq
21517 .Lmemmove_end_forward_efs:
21518 .previous
21519 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21520 index 79bd454..dff325a 100644
21521 --- a/arch/x86/lib/memset_64.S
21522 +++ b/arch/x86/lib/memset_64.S
21523 @@ -31,6 +31,7 @@
21524 movl %r8d,%ecx
21525 rep stosb
21526 movq %r9,%rax
21527 + pax_force_retaddr
21528 ret
21529 .Lmemset_e:
21530 .previous
21531 @@ -53,6 +54,7 @@
21532 movl %edx,%ecx
21533 rep stosb
21534 movq %r9,%rax
21535 + pax_force_retaddr
21536 ret
21537 .Lmemset_e_e:
21538 .previous
21539 @@ -60,13 +62,13 @@
21540 ENTRY(memset)
21541 ENTRY(__memset)
21542 CFI_STARTPROC
21543 - movq %rdi,%r10
21544 movq %rdx,%r11
21545
21546 /* expand byte value */
21547 movzbl %sil,%ecx
21548 movabs $0x0101010101010101,%rax
21549 mul %rcx /* with rax, clobbers rdx */
21550 + movq %rdi,%rdx
21551
21552 /* align dst */
21553 movl %edi,%r9d
21554 @@ -120,7 +122,8 @@ ENTRY(__memset)
21555 jnz .Lloop_1
21556
21557 .Lende:
21558 - movq %r10,%rax
21559 + movq %rdx,%rax
21560 + pax_force_retaddr
21561 ret
21562
21563 CFI_RESTORE_STATE
21564 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21565 index c9f2d9b..e7fd2c0 100644
21566 --- a/arch/x86/lib/mmx_32.c
21567 +++ b/arch/x86/lib/mmx_32.c
21568 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21569 {
21570 void *p;
21571 int i;
21572 + unsigned long cr0;
21573
21574 if (unlikely(in_interrupt()))
21575 return __memcpy(to, from, len);
21576 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21577 kernel_fpu_begin();
21578
21579 __asm__ __volatile__ (
21580 - "1: prefetch (%0)\n" /* This set is 28 bytes */
21581 - " prefetch 64(%0)\n"
21582 - " prefetch 128(%0)\n"
21583 - " prefetch 192(%0)\n"
21584 - " prefetch 256(%0)\n"
21585 + "1: prefetch (%1)\n" /* This set is 28 bytes */
21586 + " prefetch 64(%1)\n"
21587 + " prefetch 128(%1)\n"
21588 + " prefetch 192(%1)\n"
21589 + " prefetch 256(%1)\n"
21590 "2: \n"
21591 ".section .fixup, \"ax\"\n"
21592 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21593 + "3: \n"
21594 +
21595 +#ifdef CONFIG_PAX_KERNEXEC
21596 + " movl %%cr0, %0\n"
21597 + " movl %0, %%eax\n"
21598 + " andl $0xFFFEFFFF, %%eax\n"
21599 + " movl %%eax, %%cr0\n"
21600 +#endif
21601 +
21602 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21603 +
21604 +#ifdef CONFIG_PAX_KERNEXEC
21605 + " movl %0, %%cr0\n"
21606 +#endif
21607 +
21608 " jmp 2b\n"
21609 ".previous\n"
21610 _ASM_EXTABLE(1b, 3b)
21611 - : : "r" (from));
21612 + : "=&r" (cr0) : "r" (from) : "ax");
21613
21614 for ( ; i > 5; i--) {
21615 __asm__ __volatile__ (
21616 - "1: prefetch 320(%0)\n"
21617 - "2: movq (%0), %%mm0\n"
21618 - " movq 8(%0), %%mm1\n"
21619 - " movq 16(%0), %%mm2\n"
21620 - " movq 24(%0), %%mm3\n"
21621 - " movq %%mm0, (%1)\n"
21622 - " movq %%mm1, 8(%1)\n"
21623 - " movq %%mm2, 16(%1)\n"
21624 - " movq %%mm3, 24(%1)\n"
21625 - " movq 32(%0), %%mm0\n"
21626 - " movq 40(%0), %%mm1\n"
21627 - " movq 48(%0), %%mm2\n"
21628 - " movq 56(%0), %%mm3\n"
21629 - " movq %%mm0, 32(%1)\n"
21630 - " movq %%mm1, 40(%1)\n"
21631 - " movq %%mm2, 48(%1)\n"
21632 - " movq %%mm3, 56(%1)\n"
21633 + "1: prefetch 320(%1)\n"
21634 + "2: movq (%1), %%mm0\n"
21635 + " movq 8(%1), %%mm1\n"
21636 + " movq 16(%1), %%mm2\n"
21637 + " movq 24(%1), %%mm3\n"
21638 + " movq %%mm0, (%2)\n"
21639 + " movq %%mm1, 8(%2)\n"
21640 + " movq %%mm2, 16(%2)\n"
21641 + " movq %%mm3, 24(%2)\n"
21642 + " movq 32(%1), %%mm0\n"
21643 + " movq 40(%1), %%mm1\n"
21644 + " movq 48(%1), %%mm2\n"
21645 + " movq 56(%1), %%mm3\n"
21646 + " movq %%mm0, 32(%2)\n"
21647 + " movq %%mm1, 40(%2)\n"
21648 + " movq %%mm2, 48(%2)\n"
21649 + " movq %%mm3, 56(%2)\n"
21650 ".section .fixup, \"ax\"\n"
21651 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21652 + "3:\n"
21653 +
21654 +#ifdef CONFIG_PAX_KERNEXEC
21655 + " movl %%cr0, %0\n"
21656 + " movl %0, %%eax\n"
21657 + " andl $0xFFFEFFFF, %%eax\n"
21658 + " movl %%eax, %%cr0\n"
21659 +#endif
21660 +
21661 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21662 +
21663 +#ifdef CONFIG_PAX_KERNEXEC
21664 + " movl %0, %%cr0\n"
21665 +#endif
21666 +
21667 " jmp 2b\n"
21668 ".previous\n"
21669 _ASM_EXTABLE(1b, 3b)
21670 - : : "r" (from), "r" (to) : "memory");
21671 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21672
21673 from += 64;
21674 to += 64;
21675 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
21676 static void fast_copy_page(void *to, void *from)
21677 {
21678 int i;
21679 + unsigned long cr0;
21680
21681 kernel_fpu_begin();
21682
21683 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
21684 * but that is for later. -AV
21685 */
21686 __asm__ __volatile__(
21687 - "1: prefetch (%0)\n"
21688 - " prefetch 64(%0)\n"
21689 - " prefetch 128(%0)\n"
21690 - " prefetch 192(%0)\n"
21691 - " prefetch 256(%0)\n"
21692 + "1: prefetch (%1)\n"
21693 + " prefetch 64(%1)\n"
21694 + " prefetch 128(%1)\n"
21695 + " prefetch 192(%1)\n"
21696 + " prefetch 256(%1)\n"
21697 "2: \n"
21698 ".section .fixup, \"ax\"\n"
21699 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21700 + "3: \n"
21701 +
21702 +#ifdef CONFIG_PAX_KERNEXEC
21703 + " movl %%cr0, %0\n"
21704 + " movl %0, %%eax\n"
21705 + " andl $0xFFFEFFFF, %%eax\n"
21706 + " movl %%eax, %%cr0\n"
21707 +#endif
21708 +
21709 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21710 +
21711 +#ifdef CONFIG_PAX_KERNEXEC
21712 + " movl %0, %%cr0\n"
21713 +#endif
21714 +
21715 " jmp 2b\n"
21716 ".previous\n"
21717 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21718 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21719
21720 for (i = 0; i < (4096-320)/64; i++) {
21721 __asm__ __volatile__ (
21722 - "1: prefetch 320(%0)\n"
21723 - "2: movq (%0), %%mm0\n"
21724 - " movntq %%mm0, (%1)\n"
21725 - " movq 8(%0), %%mm1\n"
21726 - " movntq %%mm1, 8(%1)\n"
21727 - " movq 16(%0), %%mm2\n"
21728 - " movntq %%mm2, 16(%1)\n"
21729 - " movq 24(%0), %%mm3\n"
21730 - " movntq %%mm3, 24(%1)\n"
21731 - " movq 32(%0), %%mm4\n"
21732 - " movntq %%mm4, 32(%1)\n"
21733 - " movq 40(%0), %%mm5\n"
21734 - " movntq %%mm5, 40(%1)\n"
21735 - " movq 48(%0), %%mm6\n"
21736 - " movntq %%mm6, 48(%1)\n"
21737 - " movq 56(%0), %%mm7\n"
21738 - " movntq %%mm7, 56(%1)\n"
21739 + "1: prefetch 320(%1)\n"
21740 + "2: movq (%1), %%mm0\n"
21741 + " movntq %%mm0, (%2)\n"
21742 + " movq 8(%1), %%mm1\n"
21743 + " movntq %%mm1, 8(%2)\n"
21744 + " movq 16(%1), %%mm2\n"
21745 + " movntq %%mm2, 16(%2)\n"
21746 + " movq 24(%1), %%mm3\n"
21747 + " movntq %%mm3, 24(%2)\n"
21748 + " movq 32(%1), %%mm4\n"
21749 + " movntq %%mm4, 32(%2)\n"
21750 + " movq 40(%1), %%mm5\n"
21751 + " movntq %%mm5, 40(%2)\n"
21752 + " movq 48(%1), %%mm6\n"
21753 + " movntq %%mm6, 48(%2)\n"
21754 + " movq 56(%1), %%mm7\n"
21755 + " movntq %%mm7, 56(%2)\n"
21756 ".section .fixup, \"ax\"\n"
21757 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21758 + "3:\n"
21759 +
21760 +#ifdef CONFIG_PAX_KERNEXEC
21761 + " movl %%cr0, %0\n"
21762 + " movl %0, %%eax\n"
21763 + " andl $0xFFFEFFFF, %%eax\n"
21764 + " movl %%eax, %%cr0\n"
21765 +#endif
21766 +
21767 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21768 +
21769 +#ifdef CONFIG_PAX_KERNEXEC
21770 + " movl %0, %%cr0\n"
21771 +#endif
21772 +
21773 " jmp 2b\n"
21774 ".previous\n"
21775 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
21776 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21777
21778 from += 64;
21779 to += 64;
21780 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
21781 static void fast_copy_page(void *to, void *from)
21782 {
21783 int i;
21784 + unsigned long cr0;
21785
21786 kernel_fpu_begin();
21787
21788 __asm__ __volatile__ (
21789 - "1: prefetch (%0)\n"
21790 - " prefetch 64(%0)\n"
21791 - " prefetch 128(%0)\n"
21792 - " prefetch 192(%0)\n"
21793 - " prefetch 256(%0)\n"
21794 + "1: prefetch (%1)\n"
21795 + " prefetch 64(%1)\n"
21796 + " prefetch 128(%1)\n"
21797 + " prefetch 192(%1)\n"
21798 + " prefetch 256(%1)\n"
21799 "2: \n"
21800 ".section .fixup, \"ax\"\n"
21801 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21802 + "3: \n"
21803 +
21804 +#ifdef CONFIG_PAX_KERNEXEC
21805 + " movl %%cr0, %0\n"
21806 + " movl %0, %%eax\n"
21807 + " andl $0xFFFEFFFF, %%eax\n"
21808 + " movl %%eax, %%cr0\n"
21809 +#endif
21810 +
21811 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21812 +
21813 +#ifdef CONFIG_PAX_KERNEXEC
21814 + " movl %0, %%cr0\n"
21815 +#endif
21816 +
21817 " jmp 2b\n"
21818 ".previous\n"
21819 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21820 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21821
21822 for (i = 0; i < 4096/64; i++) {
21823 __asm__ __volatile__ (
21824 - "1: prefetch 320(%0)\n"
21825 - "2: movq (%0), %%mm0\n"
21826 - " movq 8(%0), %%mm1\n"
21827 - " movq 16(%0), %%mm2\n"
21828 - " movq 24(%0), %%mm3\n"
21829 - " movq %%mm0, (%1)\n"
21830 - " movq %%mm1, 8(%1)\n"
21831 - " movq %%mm2, 16(%1)\n"
21832 - " movq %%mm3, 24(%1)\n"
21833 - " movq 32(%0), %%mm0\n"
21834 - " movq 40(%0), %%mm1\n"
21835 - " movq 48(%0), %%mm2\n"
21836 - " movq 56(%0), %%mm3\n"
21837 - " movq %%mm0, 32(%1)\n"
21838 - " movq %%mm1, 40(%1)\n"
21839 - " movq %%mm2, 48(%1)\n"
21840 - " movq %%mm3, 56(%1)\n"
21841 + "1: prefetch 320(%1)\n"
21842 + "2: movq (%1), %%mm0\n"
21843 + " movq 8(%1), %%mm1\n"
21844 + " movq 16(%1), %%mm2\n"
21845 + " movq 24(%1), %%mm3\n"
21846 + " movq %%mm0, (%2)\n"
21847 + " movq %%mm1, 8(%2)\n"
21848 + " movq %%mm2, 16(%2)\n"
21849 + " movq %%mm3, 24(%2)\n"
21850 + " movq 32(%1), %%mm0\n"
21851 + " movq 40(%1), %%mm1\n"
21852 + " movq 48(%1), %%mm2\n"
21853 + " movq 56(%1), %%mm3\n"
21854 + " movq %%mm0, 32(%2)\n"
21855 + " movq %%mm1, 40(%2)\n"
21856 + " movq %%mm2, 48(%2)\n"
21857 + " movq %%mm3, 56(%2)\n"
21858 ".section .fixup, \"ax\"\n"
21859 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21860 + "3:\n"
21861 +
21862 +#ifdef CONFIG_PAX_KERNEXEC
21863 + " movl %%cr0, %0\n"
21864 + " movl %0, %%eax\n"
21865 + " andl $0xFFFEFFFF, %%eax\n"
21866 + " movl %%eax, %%cr0\n"
21867 +#endif
21868 +
21869 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21870 +
21871 +#ifdef CONFIG_PAX_KERNEXEC
21872 + " movl %0, %%cr0\n"
21873 +#endif
21874 +
21875 " jmp 2b\n"
21876 ".previous\n"
21877 _ASM_EXTABLE(1b, 3b)
21878 - : : "r" (from), "r" (to) : "memory");
21879 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21880
21881 from += 64;
21882 to += 64;
21883 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
21884 index 69fa106..adda88b 100644
21885 --- a/arch/x86/lib/msr-reg.S
21886 +++ b/arch/x86/lib/msr-reg.S
21887 @@ -3,6 +3,7 @@
21888 #include <asm/dwarf2.h>
21889 #include <asm/asm.h>
21890 #include <asm/msr.h>
21891 +#include <asm/alternative-asm.h>
21892
21893 #ifdef CONFIG_X86_64
21894 /*
21895 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
21896 CFI_STARTPROC
21897 pushq_cfi %rbx
21898 pushq_cfi %rbp
21899 - movq %rdi, %r10 /* Save pointer */
21900 + movq %rdi, %r9 /* Save pointer */
21901 xorl %r11d, %r11d /* Return value */
21902 movl (%rdi), %eax
21903 movl 4(%rdi), %ecx
21904 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
21905 movl 28(%rdi), %edi
21906 CFI_REMEMBER_STATE
21907 1: \op
21908 -2: movl %eax, (%r10)
21909 +2: movl %eax, (%r9)
21910 movl %r11d, %eax /* Return value */
21911 - movl %ecx, 4(%r10)
21912 - movl %edx, 8(%r10)
21913 - movl %ebx, 12(%r10)
21914 - movl %ebp, 20(%r10)
21915 - movl %esi, 24(%r10)
21916 - movl %edi, 28(%r10)
21917 + movl %ecx, 4(%r9)
21918 + movl %edx, 8(%r9)
21919 + movl %ebx, 12(%r9)
21920 + movl %ebp, 20(%r9)
21921 + movl %esi, 24(%r9)
21922 + movl %edi, 28(%r9)
21923 popq_cfi %rbp
21924 popq_cfi %rbx
21925 + pax_force_retaddr
21926 ret
21927 3:
21928 CFI_RESTORE_STATE
21929 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
21930 index 36b0d15..d381858 100644
21931 --- a/arch/x86/lib/putuser.S
21932 +++ b/arch/x86/lib/putuser.S
21933 @@ -15,7 +15,9 @@
21934 #include <asm/thread_info.h>
21935 #include <asm/errno.h>
21936 #include <asm/asm.h>
21937 -
21938 +#include <asm/segment.h>
21939 +#include <asm/pgtable.h>
21940 +#include <asm/alternative-asm.h>
21941
21942 /*
21943 * __put_user_X
21944 @@ -29,52 +31,119 @@
21945 * as they get called from within inline assembly.
21946 */
21947
21948 -#define ENTER CFI_STARTPROC ; \
21949 - GET_THREAD_INFO(%_ASM_BX)
21950 -#define EXIT ret ; \
21951 +#define ENTER CFI_STARTPROC
21952 +#define EXIT pax_force_retaddr; ret ; \
21953 CFI_ENDPROC
21954
21955 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21956 +#define _DEST %_ASM_CX,%_ASM_BX
21957 +#else
21958 +#define _DEST %_ASM_CX
21959 +#endif
21960 +
21961 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21962 +#define __copyuser_seg gs;
21963 +#else
21964 +#define __copyuser_seg
21965 +#endif
21966 +
21967 .text
21968 ENTRY(__put_user_1)
21969 ENTER
21970 +
21971 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21972 + GET_THREAD_INFO(%_ASM_BX)
21973 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
21974 jae bad_put_user
21975 -1: movb %al,(%_ASM_CX)
21976 +
21977 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21978 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
21979 + cmp %_ASM_BX,%_ASM_CX
21980 + jb 1234f
21981 + xor %ebx,%ebx
21982 +1234:
21983 +#endif
21984 +
21985 +#endif
21986 +
21987 +1: __copyuser_seg movb %al,(_DEST)
21988 xor %eax,%eax
21989 EXIT
21990 ENDPROC(__put_user_1)
21991
21992 ENTRY(__put_user_2)
21993 ENTER
21994 +
21995 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21996 + GET_THREAD_INFO(%_ASM_BX)
21997 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
21998 sub $1,%_ASM_BX
21999 cmp %_ASM_BX,%_ASM_CX
22000 jae bad_put_user
22001 -2: movw %ax,(%_ASM_CX)
22002 +
22003 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22004 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22005 + cmp %_ASM_BX,%_ASM_CX
22006 + jb 1234f
22007 + xor %ebx,%ebx
22008 +1234:
22009 +#endif
22010 +
22011 +#endif
22012 +
22013 +2: __copyuser_seg movw %ax,(_DEST)
22014 xor %eax,%eax
22015 EXIT
22016 ENDPROC(__put_user_2)
22017
22018 ENTRY(__put_user_4)
22019 ENTER
22020 +
22021 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22022 + GET_THREAD_INFO(%_ASM_BX)
22023 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22024 sub $3,%_ASM_BX
22025 cmp %_ASM_BX,%_ASM_CX
22026 jae bad_put_user
22027 -3: movl %eax,(%_ASM_CX)
22028 +
22029 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22030 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22031 + cmp %_ASM_BX,%_ASM_CX
22032 + jb 1234f
22033 + xor %ebx,%ebx
22034 +1234:
22035 +#endif
22036 +
22037 +#endif
22038 +
22039 +3: __copyuser_seg movl %eax,(_DEST)
22040 xor %eax,%eax
22041 EXIT
22042 ENDPROC(__put_user_4)
22043
22044 ENTRY(__put_user_8)
22045 ENTER
22046 +
22047 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22048 + GET_THREAD_INFO(%_ASM_BX)
22049 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22050 sub $7,%_ASM_BX
22051 cmp %_ASM_BX,%_ASM_CX
22052 jae bad_put_user
22053 -4: mov %_ASM_AX,(%_ASM_CX)
22054 +
22055 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22056 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22057 + cmp %_ASM_BX,%_ASM_CX
22058 + jb 1234f
22059 + xor %ebx,%ebx
22060 +1234:
22061 +#endif
22062 +
22063 +#endif
22064 +
22065 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22066 #ifdef CONFIG_X86_32
22067 -5: movl %edx,4(%_ASM_CX)
22068 +5: __copyuser_seg movl %edx,4(_DEST)
22069 #endif
22070 xor %eax,%eax
22071 EXIT
22072 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22073 index 1cad221..de671ee 100644
22074 --- a/arch/x86/lib/rwlock.S
22075 +++ b/arch/x86/lib/rwlock.S
22076 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22077 FRAME
22078 0: LOCK_PREFIX
22079 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22080 +
22081 +#ifdef CONFIG_PAX_REFCOUNT
22082 + jno 1234f
22083 + LOCK_PREFIX
22084 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22085 + int $4
22086 +1234:
22087 + _ASM_EXTABLE(1234b, 1234b)
22088 +#endif
22089 +
22090 1: rep; nop
22091 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22092 jne 1b
22093 LOCK_PREFIX
22094 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22095 +
22096 +#ifdef CONFIG_PAX_REFCOUNT
22097 + jno 1234f
22098 + LOCK_PREFIX
22099 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22100 + int $4
22101 +1234:
22102 + _ASM_EXTABLE(1234b, 1234b)
22103 +#endif
22104 +
22105 jnz 0b
22106 ENDFRAME
22107 + pax_force_retaddr
22108 ret
22109 CFI_ENDPROC
22110 END(__write_lock_failed)
22111 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22112 FRAME
22113 0: LOCK_PREFIX
22114 READ_LOCK_SIZE(inc) (%__lock_ptr)
22115 +
22116 +#ifdef CONFIG_PAX_REFCOUNT
22117 + jno 1234f
22118 + LOCK_PREFIX
22119 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22120 + int $4
22121 +1234:
22122 + _ASM_EXTABLE(1234b, 1234b)
22123 +#endif
22124 +
22125 1: rep; nop
22126 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22127 js 1b
22128 LOCK_PREFIX
22129 READ_LOCK_SIZE(dec) (%__lock_ptr)
22130 +
22131 +#ifdef CONFIG_PAX_REFCOUNT
22132 + jno 1234f
22133 + LOCK_PREFIX
22134 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22135 + int $4
22136 +1234:
22137 + _ASM_EXTABLE(1234b, 1234b)
22138 +#endif
22139 +
22140 js 0b
22141 ENDFRAME
22142 + pax_force_retaddr
22143 ret
22144 CFI_ENDPROC
22145 END(__read_lock_failed)
22146 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22147 index 5dff5f0..cadebf4 100644
22148 --- a/arch/x86/lib/rwsem.S
22149 +++ b/arch/x86/lib/rwsem.S
22150 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22151 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22152 CFI_RESTORE __ASM_REG(dx)
22153 restore_common_regs
22154 + pax_force_retaddr
22155 ret
22156 CFI_ENDPROC
22157 ENDPROC(call_rwsem_down_read_failed)
22158 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22159 movq %rax,%rdi
22160 call rwsem_down_write_failed
22161 restore_common_regs
22162 + pax_force_retaddr
22163 ret
22164 CFI_ENDPROC
22165 ENDPROC(call_rwsem_down_write_failed)
22166 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22167 movq %rax,%rdi
22168 call rwsem_wake
22169 restore_common_regs
22170 -1: ret
22171 +1: pax_force_retaddr
22172 + ret
22173 CFI_ENDPROC
22174 ENDPROC(call_rwsem_wake)
22175
22176 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22177 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22178 CFI_RESTORE __ASM_REG(dx)
22179 restore_common_regs
22180 + pax_force_retaddr
22181 ret
22182 CFI_ENDPROC
22183 ENDPROC(call_rwsem_downgrade_wake)
22184 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22185 index a63efd6..ccecad8 100644
22186 --- a/arch/x86/lib/thunk_64.S
22187 +++ b/arch/x86/lib/thunk_64.S
22188 @@ -8,6 +8,7 @@
22189 #include <linux/linkage.h>
22190 #include <asm/dwarf2.h>
22191 #include <asm/calling.h>
22192 +#include <asm/alternative-asm.h>
22193
22194 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22195 .macro THUNK name, func, put_ret_addr_in_rdi=0
22196 @@ -41,5 +42,6 @@
22197 SAVE_ARGS
22198 restore:
22199 RESTORE_ARGS
22200 + pax_force_retaddr
22201 ret
22202 CFI_ENDPROC
22203 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22204 index e218d5d..35679b4 100644
22205 --- a/arch/x86/lib/usercopy_32.c
22206 +++ b/arch/x86/lib/usercopy_32.c
22207 @@ -43,7 +43,7 @@ do { \
22208 __asm__ __volatile__( \
22209 " testl %1,%1\n" \
22210 " jz 2f\n" \
22211 - "0: lodsb\n" \
22212 + "0: "__copyuser_seg"lodsb\n" \
22213 " stosb\n" \
22214 " testb %%al,%%al\n" \
22215 " jz 1f\n" \
22216 @@ -128,10 +128,12 @@ do { \
22217 int __d0; \
22218 might_fault(); \
22219 __asm__ __volatile__( \
22220 + __COPYUSER_SET_ES \
22221 "0: rep; stosl\n" \
22222 " movl %2,%0\n" \
22223 "1: rep; stosb\n" \
22224 "2:\n" \
22225 + __COPYUSER_RESTORE_ES \
22226 ".section .fixup,\"ax\"\n" \
22227 "3: lea 0(%2,%0,4),%0\n" \
22228 " jmp 2b\n" \
22229 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22230 might_fault();
22231
22232 __asm__ __volatile__(
22233 + __COPYUSER_SET_ES
22234 " testl %0, %0\n"
22235 " jz 3f\n"
22236 " andl %0,%%ecx\n"
22237 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22238 " subl %%ecx,%0\n"
22239 " addl %0,%%eax\n"
22240 "1:\n"
22241 + __COPYUSER_RESTORE_ES
22242 ".section .fixup,\"ax\"\n"
22243 "2: xorl %%eax,%%eax\n"
22244 " jmp 1b\n"
22245 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22246
22247 #ifdef CONFIG_X86_INTEL_USERCOPY
22248 static unsigned long
22249 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22250 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22251 {
22252 int d0, d1;
22253 __asm__ __volatile__(
22254 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22255 " .align 2,0x90\n"
22256 "3: movl 0(%4), %%eax\n"
22257 "4: movl 4(%4), %%edx\n"
22258 - "5: movl %%eax, 0(%3)\n"
22259 - "6: movl %%edx, 4(%3)\n"
22260 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22261 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22262 "7: movl 8(%4), %%eax\n"
22263 "8: movl 12(%4),%%edx\n"
22264 - "9: movl %%eax, 8(%3)\n"
22265 - "10: movl %%edx, 12(%3)\n"
22266 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22267 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22268 "11: movl 16(%4), %%eax\n"
22269 "12: movl 20(%4), %%edx\n"
22270 - "13: movl %%eax, 16(%3)\n"
22271 - "14: movl %%edx, 20(%3)\n"
22272 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22273 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22274 "15: movl 24(%4), %%eax\n"
22275 "16: movl 28(%4), %%edx\n"
22276 - "17: movl %%eax, 24(%3)\n"
22277 - "18: movl %%edx, 28(%3)\n"
22278 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22279 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22280 "19: movl 32(%4), %%eax\n"
22281 "20: movl 36(%4), %%edx\n"
22282 - "21: movl %%eax, 32(%3)\n"
22283 - "22: movl %%edx, 36(%3)\n"
22284 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22285 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22286 "23: movl 40(%4), %%eax\n"
22287 "24: movl 44(%4), %%edx\n"
22288 - "25: movl %%eax, 40(%3)\n"
22289 - "26: movl %%edx, 44(%3)\n"
22290 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22291 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22292 "27: movl 48(%4), %%eax\n"
22293 "28: movl 52(%4), %%edx\n"
22294 - "29: movl %%eax, 48(%3)\n"
22295 - "30: movl %%edx, 52(%3)\n"
22296 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22297 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22298 "31: movl 56(%4), %%eax\n"
22299 "32: movl 60(%4), %%edx\n"
22300 - "33: movl %%eax, 56(%3)\n"
22301 - "34: movl %%edx, 60(%3)\n"
22302 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22303 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22304 " addl $-64, %0\n"
22305 " addl $64, %4\n"
22306 " addl $64, %3\n"
22307 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22308 " shrl $2, %0\n"
22309 " andl $3, %%eax\n"
22310 " cld\n"
22311 + __COPYUSER_SET_ES
22312 "99: rep; movsl\n"
22313 "36: movl %%eax, %0\n"
22314 "37: rep; movsb\n"
22315 "100:\n"
22316 + __COPYUSER_RESTORE_ES
22317 + ".section .fixup,\"ax\"\n"
22318 + "101: lea 0(%%eax,%0,4),%0\n"
22319 + " jmp 100b\n"
22320 + ".previous\n"
22321 + ".section __ex_table,\"a\"\n"
22322 + " .align 4\n"
22323 + " .long 1b,100b\n"
22324 + " .long 2b,100b\n"
22325 + " .long 3b,100b\n"
22326 + " .long 4b,100b\n"
22327 + " .long 5b,100b\n"
22328 + " .long 6b,100b\n"
22329 + " .long 7b,100b\n"
22330 + " .long 8b,100b\n"
22331 + " .long 9b,100b\n"
22332 + " .long 10b,100b\n"
22333 + " .long 11b,100b\n"
22334 + " .long 12b,100b\n"
22335 + " .long 13b,100b\n"
22336 + " .long 14b,100b\n"
22337 + " .long 15b,100b\n"
22338 + " .long 16b,100b\n"
22339 + " .long 17b,100b\n"
22340 + " .long 18b,100b\n"
22341 + " .long 19b,100b\n"
22342 + " .long 20b,100b\n"
22343 + " .long 21b,100b\n"
22344 + " .long 22b,100b\n"
22345 + " .long 23b,100b\n"
22346 + " .long 24b,100b\n"
22347 + " .long 25b,100b\n"
22348 + " .long 26b,100b\n"
22349 + " .long 27b,100b\n"
22350 + " .long 28b,100b\n"
22351 + " .long 29b,100b\n"
22352 + " .long 30b,100b\n"
22353 + " .long 31b,100b\n"
22354 + " .long 32b,100b\n"
22355 + " .long 33b,100b\n"
22356 + " .long 34b,100b\n"
22357 + " .long 35b,100b\n"
22358 + " .long 36b,100b\n"
22359 + " .long 37b,100b\n"
22360 + " .long 99b,101b\n"
22361 + ".previous"
22362 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22363 + : "1"(to), "2"(from), "0"(size)
22364 + : "eax", "edx", "memory");
22365 + return size;
22366 +}
22367 +
22368 +static unsigned long
22369 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22370 +{
22371 + int d0, d1;
22372 + __asm__ __volatile__(
22373 + " .align 2,0x90\n"
22374 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22375 + " cmpl $67, %0\n"
22376 + " jbe 3f\n"
22377 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22378 + " .align 2,0x90\n"
22379 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22380 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22381 + "5: movl %%eax, 0(%3)\n"
22382 + "6: movl %%edx, 4(%3)\n"
22383 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22384 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22385 + "9: movl %%eax, 8(%3)\n"
22386 + "10: movl %%edx, 12(%3)\n"
22387 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22388 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22389 + "13: movl %%eax, 16(%3)\n"
22390 + "14: movl %%edx, 20(%3)\n"
22391 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22392 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22393 + "17: movl %%eax, 24(%3)\n"
22394 + "18: movl %%edx, 28(%3)\n"
22395 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22396 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22397 + "21: movl %%eax, 32(%3)\n"
22398 + "22: movl %%edx, 36(%3)\n"
22399 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22400 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22401 + "25: movl %%eax, 40(%3)\n"
22402 + "26: movl %%edx, 44(%3)\n"
22403 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22404 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22405 + "29: movl %%eax, 48(%3)\n"
22406 + "30: movl %%edx, 52(%3)\n"
22407 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22408 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22409 + "33: movl %%eax, 56(%3)\n"
22410 + "34: movl %%edx, 60(%3)\n"
22411 + " addl $-64, %0\n"
22412 + " addl $64, %4\n"
22413 + " addl $64, %3\n"
22414 + " cmpl $63, %0\n"
22415 + " ja 1b\n"
22416 + "35: movl %0, %%eax\n"
22417 + " shrl $2, %0\n"
22418 + " andl $3, %%eax\n"
22419 + " cld\n"
22420 + "99: rep; "__copyuser_seg" movsl\n"
22421 + "36: movl %%eax, %0\n"
22422 + "37: rep; "__copyuser_seg" movsb\n"
22423 + "100:\n"
22424 ".section .fixup,\"ax\"\n"
22425 "101: lea 0(%%eax,%0,4),%0\n"
22426 " jmp 100b\n"
22427 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22428 int d0, d1;
22429 __asm__ __volatile__(
22430 " .align 2,0x90\n"
22431 - "0: movl 32(%4), %%eax\n"
22432 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22433 " cmpl $67, %0\n"
22434 " jbe 2f\n"
22435 - "1: movl 64(%4), %%eax\n"
22436 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22437 " .align 2,0x90\n"
22438 - "2: movl 0(%4), %%eax\n"
22439 - "21: movl 4(%4), %%edx\n"
22440 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22441 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22442 " movl %%eax, 0(%3)\n"
22443 " movl %%edx, 4(%3)\n"
22444 - "3: movl 8(%4), %%eax\n"
22445 - "31: movl 12(%4),%%edx\n"
22446 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22447 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22448 " movl %%eax, 8(%3)\n"
22449 " movl %%edx, 12(%3)\n"
22450 - "4: movl 16(%4), %%eax\n"
22451 - "41: movl 20(%4), %%edx\n"
22452 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22453 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22454 " movl %%eax, 16(%3)\n"
22455 " movl %%edx, 20(%3)\n"
22456 - "10: movl 24(%4), %%eax\n"
22457 - "51: movl 28(%4), %%edx\n"
22458 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22459 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22460 " movl %%eax, 24(%3)\n"
22461 " movl %%edx, 28(%3)\n"
22462 - "11: movl 32(%4), %%eax\n"
22463 - "61: movl 36(%4), %%edx\n"
22464 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22465 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22466 " movl %%eax, 32(%3)\n"
22467 " movl %%edx, 36(%3)\n"
22468 - "12: movl 40(%4), %%eax\n"
22469 - "71: movl 44(%4), %%edx\n"
22470 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22471 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22472 " movl %%eax, 40(%3)\n"
22473 " movl %%edx, 44(%3)\n"
22474 - "13: movl 48(%4), %%eax\n"
22475 - "81: movl 52(%4), %%edx\n"
22476 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22477 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22478 " movl %%eax, 48(%3)\n"
22479 " movl %%edx, 52(%3)\n"
22480 - "14: movl 56(%4), %%eax\n"
22481 - "91: movl 60(%4), %%edx\n"
22482 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22483 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22484 " movl %%eax, 56(%3)\n"
22485 " movl %%edx, 60(%3)\n"
22486 " addl $-64, %0\n"
22487 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22488 " shrl $2, %0\n"
22489 " andl $3, %%eax\n"
22490 " cld\n"
22491 - "6: rep; movsl\n"
22492 + "6: rep; "__copyuser_seg" movsl\n"
22493 " movl %%eax,%0\n"
22494 - "7: rep; movsb\n"
22495 + "7: rep; "__copyuser_seg" movsb\n"
22496 "8:\n"
22497 ".section .fixup,\"ax\"\n"
22498 "9: lea 0(%%eax,%0,4),%0\n"
22499 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22500
22501 __asm__ __volatile__(
22502 " .align 2,0x90\n"
22503 - "0: movl 32(%4), %%eax\n"
22504 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22505 " cmpl $67, %0\n"
22506 " jbe 2f\n"
22507 - "1: movl 64(%4), %%eax\n"
22508 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22509 " .align 2,0x90\n"
22510 - "2: movl 0(%4), %%eax\n"
22511 - "21: movl 4(%4), %%edx\n"
22512 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22513 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22514 " movnti %%eax, 0(%3)\n"
22515 " movnti %%edx, 4(%3)\n"
22516 - "3: movl 8(%4), %%eax\n"
22517 - "31: movl 12(%4),%%edx\n"
22518 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22519 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22520 " movnti %%eax, 8(%3)\n"
22521 " movnti %%edx, 12(%3)\n"
22522 - "4: movl 16(%4), %%eax\n"
22523 - "41: movl 20(%4), %%edx\n"
22524 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22525 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22526 " movnti %%eax, 16(%3)\n"
22527 " movnti %%edx, 20(%3)\n"
22528 - "10: movl 24(%4), %%eax\n"
22529 - "51: movl 28(%4), %%edx\n"
22530 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22531 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22532 " movnti %%eax, 24(%3)\n"
22533 " movnti %%edx, 28(%3)\n"
22534 - "11: movl 32(%4), %%eax\n"
22535 - "61: movl 36(%4), %%edx\n"
22536 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22537 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22538 " movnti %%eax, 32(%3)\n"
22539 " movnti %%edx, 36(%3)\n"
22540 - "12: movl 40(%4), %%eax\n"
22541 - "71: movl 44(%4), %%edx\n"
22542 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22543 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22544 " movnti %%eax, 40(%3)\n"
22545 " movnti %%edx, 44(%3)\n"
22546 - "13: movl 48(%4), %%eax\n"
22547 - "81: movl 52(%4), %%edx\n"
22548 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22549 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22550 " movnti %%eax, 48(%3)\n"
22551 " movnti %%edx, 52(%3)\n"
22552 - "14: movl 56(%4), %%eax\n"
22553 - "91: movl 60(%4), %%edx\n"
22554 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22555 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22556 " movnti %%eax, 56(%3)\n"
22557 " movnti %%edx, 60(%3)\n"
22558 " addl $-64, %0\n"
22559 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22560 " shrl $2, %0\n"
22561 " andl $3, %%eax\n"
22562 " cld\n"
22563 - "6: rep; movsl\n"
22564 + "6: rep; "__copyuser_seg" movsl\n"
22565 " movl %%eax,%0\n"
22566 - "7: rep; movsb\n"
22567 + "7: rep; "__copyuser_seg" movsb\n"
22568 "8:\n"
22569 ".section .fixup,\"ax\"\n"
22570 "9: lea 0(%%eax,%0,4),%0\n"
22571 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
22572
22573 __asm__ __volatile__(
22574 " .align 2,0x90\n"
22575 - "0: movl 32(%4), %%eax\n"
22576 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22577 " cmpl $67, %0\n"
22578 " jbe 2f\n"
22579 - "1: movl 64(%4), %%eax\n"
22580 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22581 " .align 2,0x90\n"
22582 - "2: movl 0(%4), %%eax\n"
22583 - "21: movl 4(%4), %%edx\n"
22584 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22585 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22586 " movnti %%eax, 0(%3)\n"
22587 " movnti %%edx, 4(%3)\n"
22588 - "3: movl 8(%4), %%eax\n"
22589 - "31: movl 12(%4),%%edx\n"
22590 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22591 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22592 " movnti %%eax, 8(%3)\n"
22593 " movnti %%edx, 12(%3)\n"
22594 - "4: movl 16(%4), %%eax\n"
22595 - "41: movl 20(%4), %%edx\n"
22596 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22597 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22598 " movnti %%eax, 16(%3)\n"
22599 " movnti %%edx, 20(%3)\n"
22600 - "10: movl 24(%4), %%eax\n"
22601 - "51: movl 28(%4), %%edx\n"
22602 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22603 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22604 " movnti %%eax, 24(%3)\n"
22605 " movnti %%edx, 28(%3)\n"
22606 - "11: movl 32(%4), %%eax\n"
22607 - "61: movl 36(%4), %%edx\n"
22608 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22609 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22610 " movnti %%eax, 32(%3)\n"
22611 " movnti %%edx, 36(%3)\n"
22612 - "12: movl 40(%4), %%eax\n"
22613 - "71: movl 44(%4), %%edx\n"
22614 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22615 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22616 " movnti %%eax, 40(%3)\n"
22617 " movnti %%edx, 44(%3)\n"
22618 - "13: movl 48(%4), %%eax\n"
22619 - "81: movl 52(%4), %%edx\n"
22620 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22621 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22622 " movnti %%eax, 48(%3)\n"
22623 " movnti %%edx, 52(%3)\n"
22624 - "14: movl 56(%4), %%eax\n"
22625 - "91: movl 60(%4), %%edx\n"
22626 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22627 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22628 " movnti %%eax, 56(%3)\n"
22629 " movnti %%edx, 60(%3)\n"
22630 " addl $-64, %0\n"
22631 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
22632 " shrl $2, %0\n"
22633 " andl $3, %%eax\n"
22634 " cld\n"
22635 - "6: rep; movsl\n"
22636 + "6: rep; "__copyuser_seg" movsl\n"
22637 " movl %%eax,%0\n"
22638 - "7: rep; movsb\n"
22639 + "7: rep; "__copyuser_seg" movsb\n"
22640 "8:\n"
22641 ".section .fixup,\"ax\"\n"
22642 "9: lea 0(%%eax,%0,4),%0\n"
22643 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
22644 */
22645 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
22646 unsigned long size);
22647 -unsigned long __copy_user_intel(void __user *to, const void *from,
22648 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
22649 + unsigned long size);
22650 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
22651 unsigned long size);
22652 unsigned long __copy_user_zeroing_intel_nocache(void *to,
22653 const void __user *from, unsigned long size);
22654 #endif /* CONFIG_X86_INTEL_USERCOPY */
22655
22656 /* Generic arbitrary sized copy. */
22657 -#define __copy_user(to, from, size) \
22658 +#define __copy_user(to, from, size, prefix, set, restore) \
22659 do { \
22660 int __d0, __d1, __d2; \
22661 __asm__ __volatile__( \
22662 + set \
22663 " cmp $7,%0\n" \
22664 " jbe 1f\n" \
22665 " movl %1,%0\n" \
22666 " negl %0\n" \
22667 " andl $7,%0\n" \
22668 " subl %0,%3\n" \
22669 - "4: rep; movsb\n" \
22670 + "4: rep; "prefix"movsb\n" \
22671 " movl %3,%0\n" \
22672 " shrl $2,%0\n" \
22673 " andl $3,%3\n" \
22674 " .align 2,0x90\n" \
22675 - "0: rep; movsl\n" \
22676 + "0: rep; "prefix"movsl\n" \
22677 " movl %3,%0\n" \
22678 - "1: rep; movsb\n" \
22679 + "1: rep; "prefix"movsb\n" \
22680 "2:\n" \
22681 + restore \
22682 ".section .fixup,\"ax\"\n" \
22683 "5: addl %3,%0\n" \
22684 " jmp 2b\n" \
22685 @@ -682,14 +799,14 @@ do { \
22686 " negl %0\n" \
22687 " andl $7,%0\n" \
22688 " subl %0,%3\n" \
22689 - "4: rep; movsb\n" \
22690 + "4: rep; "__copyuser_seg"movsb\n" \
22691 " movl %3,%0\n" \
22692 " shrl $2,%0\n" \
22693 " andl $3,%3\n" \
22694 " .align 2,0x90\n" \
22695 - "0: rep; movsl\n" \
22696 + "0: rep; "__copyuser_seg"movsl\n" \
22697 " movl %3,%0\n" \
22698 - "1: rep; movsb\n" \
22699 + "1: rep; "__copyuser_seg"movsb\n" \
22700 "2:\n" \
22701 ".section .fixup,\"ax\"\n" \
22702 "5: addl %3,%0\n" \
22703 @@ -775,9 +892,9 @@ survive:
22704 }
22705 #endif
22706 if (movsl_is_ok(to, from, n))
22707 - __copy_user(to, from, n);
22708 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
22709 else
22710 - n = __copy_user_intel(to, from, n);
22711 + n = __generic_copy_to_user_intel(to, from, n);
22712 return n;
22713 }
22714 EXPORT_SYMBOL(__copy_to_user_ll);
22715 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
22716 unsigned long n)
22717 {
22718 if (movsl_is_ok(to, from, n))
22719 - __copy_user(to, from, n);
22720 + __copy_user(to, from, n, __copyuser_seg, "", "");
22721 else
22722 - n = __copy_user_intel((void __user *)to,
22723 - (const void *)from, n);
22724 + n = __generic_copy_from_user_intel(to, from, n);
22725 return n;
22726 }
22727 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
22728 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
22729 if (n > 64 && cpu_has_xmm2)
22730 n = __copy_user_intel_nocache(to, from, n);
22731 else
22732 - __copy_user(to, from, n);
22733 + __copy_user(to, from, n, __copyuser_seg, "", "");
22734 #else
22735 - __copy_user(to, from, n);
22736 + __copy_user(to, from, n, __copyuser_seg, "", "");
22737 #endif
22738 return n;
22739 }
22740 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
22741
22742 -/**
22743 - * copy_to_user: - Copy a block of data into user space.
22744 - * @to: Destination address, in user space.
22745 - * @from: Source address, in kernel space.
22746 - * @n: Number of bytes to copy.
22747 - *
22748 - * Context: User context only. This function may sleep.
22749 - *
22750 - * Copy data from kernel space to user space.
22751 - *
22752 - * Returns number of bytes that could not be copied.
22753 - * On success, this will be zero.
22754 - */
22755 -unsigned long
22756 -copy_to_user(void __user *to, const void *from, unsigned long n)
22757 -{
22758 - if (access_ok(VERIFY_WRITE, to, n))
22759 - n = __copy_to_user(to, from, n);
22760 - return n;
22761 -}
22762 -EXPORT_SYMBOL(copy_to_user);
22763 -
22764 -/**
22765 - * copy_from_user: - Copy a block of data from user space.
22766 - * @to: Destination address, in kernel space.
22767 - * @from: Source address, in user space.
22768 - * @n: Number of bytes to copy.
22769 - *
22770 - * Context: User context only. This function may sleep.
22771 - *
22772 - * Copy data from user space to kernel space.
22773 - *
22774 - * Returns number of bytes that could not be copied.
22775 - * On success, this will be zero.
22776 - *
22777 - * If some data could not be copied, this function will pad the copied
22778 - * data to the requested size using zero bytes.
22779 - */
22780 -unsigned long
22781 -_copy_from_user(void *to, const void __user *from, unsigned long n)
22782 -{
22783 - if (access_ok(VERIFY_READ, from, n))
22784 - n = __copy_from_user(to, from, n);
22785 - else
22786 - memset(to, 0, n);
22787 - return n;
22788 -}
22789 -EXPORT_SYMBOL(_copy_from_user);
22790 -
22791 void copy_from_user_overflow(void)
22792 {
22793 WARN(1, "Buffer overflow detected!\n");
22794 }
22795 EXPORT_SYMBOL(copy_from_user_overflow);
22796 +
22797 +void copy_to_user_overflow(void)
22798 +{
22799 + WARN(1, "Buffer overflow detected!\n");
22800 +}
22801 +EXPORT_SYMBOL(copy_to_user_overflow);
22802 +
22803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22804 +void __set_fs(mm_segment_t x)
22805 +{
22806 + switch (x.seg) {
22807 + case 0:
22808 + loadsegment(gs, 0);
22809 + break;
22810 + case TASK_SIZE_MAX:
22811 + loadsegment(gs, __USER_DS);
22812 + break;
22813 + case -1UL:
22814 + loadsegment(gs, __KERNEL_DS);
22815 + break;
22816 + default:
22817 + BUG();
22818 + }
22819 + return;
22820 +}
22821 +EXPORT_SYMBOL(__set_fs);
22822 +
22823 +void set_fs(mm_segment_t x)
22824 +{
22825 + current_thread_info()->addr_limit = x;
22826 + __set_fs(x);
22827 +}
22828 +EXPORT_SYMBOL(set_fs);
22829 +#endif
22830 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
22831 index b7c2849..8633ad8 100644
22832 --- a/arch/x86/lib/usercopy_64.c
22833 +++ b/arch/x86/lib/usercopy_64.c
22834 @@ -42,6 +42,12 @@ long
22835 __strncpy_from_user(char *dst, const char __user *src, long count)
22836 {
22837 long res;
22838 +
22839 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22840 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22841 + src += PAX_USER_SHADOW_BASE;
22842 +#endif
22843 +
22844 __do_strncpy_from_user(dst, src, count, res);
22845 return res;
22846 }
22847 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
22848 {
22849 long __d0;
22850 might_fault();
22851 +
22852 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22853 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
22854 + addr += PAX_USER_SHADOW_BASE;
22855 +#endif
22856 +
22857 /* no memory constraint because it doesn't change any memory gcc knows
22858 about */
22859 asm volatile(
22860 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
22861 }
22862 EXPORT_SYMBOL(strlen_user);
22863
22864 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
22865 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
22866 {
22867 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22868 - return copy_user_generic((__force void *)to, (__force void *)from, len);
22869 - }
22870 - return len;
22871 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22872 +
22873 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22874 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
22875 + to += PAX_USER_SHADOW_BASE;
22876 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
22877 + from += PAX_USER_SHADOW_BASE;
22878 +#endif
22879 +
22880 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
22881 + }
22882 + return len;
22883 }
22884 EXPORT_SYMBOL(copy_in_user);
22885
22886 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
22887 * it is not necessary to optimize tail handling.
22888 */
22889 unsigned long
22890 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
22891 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
22892 {
22893 char c;
22894 unsigned zero_len;
22895 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
22896 index 1fb85db..8b3540b 100644
22897 --- a/arch/x86/mm/extable.c
22898 +++ b/arch/x86/mm/extable.c
22899 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
22900 const struct exception_table_entry *fixup;
22901
22902 #ifdef CONFIG_PNPBIOS
22903 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
22904 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
22905 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
22906 extern u32 pnp_bios_is_utter_crap;
22907 pnp_bios_is_utter_crap = 1;
22908 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
22909 index f0b4caf..d92fd42 100644
22910 --- a/arch/x86/mm/fault.c
22911 +++ b/arch/x86/mm/fault.c
22912 @@ -13,11 +13,18 @@
22913 #include <linux/perf_event.h> /* perf_sw_event */
22914 #include <linux/hugetlb.h> /* hstate_index_to_shift */
22915 #include <linux/prefetch.h> /* prefetchw */
22916 +#include <linux/unistd.h>
22917 +#include <linux/compiler.h>
22918
22919 #include <asm/traps.h> /* dotraplinkage, ... */
22920 #include <asm/pgalloc.h> /* pgd_*(), ... */
22921 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
22922 #include <asm/fixmap.h> /* VSYSCALL_START */
22923 +#include <asm/tlbflush.h>
22924 +
22925 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22926 +#include <asm/stacktrace.h>
22927 +#endif
22928
22929 /*
22930 * Page fault error code bits:
22931 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
22932 int ret = 0;
22933
22934 /* kprobe_running() needs smp_processor_id() */
22935 - if (kprobes_built_in() && !user_mode_vm(regs)) {
22936 + if (kprobes_built_in() && !user_mode(regs)) {
22937 preempt_disable();
22938 if (kprobe_running() && kprobe_fault_handler(regs, 14))
22939 ret = 1;
22940 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
22941 return !instr_lo || (instr_lo>>1) == 1;
22942 case 0x00:
22943 /* Prefetch instruction is 0x0F0D or 0x0F18 */
22944 - if (probe_kernel_address(instr, opcode))
22945 + if (user_mode(regs)) {
22946 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22947 + return 0;
22948 + } else if (probe_kernel_address(instr, opcode))
22949 return 0;
22950
22951 *prefetch = (instr_lo == 0xF) &&
22952 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
22953 while (instr < max_instr) {
22954 unsigned char opcode;
22955
22956 - if (probe_kernel_address(instr, opcode))
22957 + if (user_mode(regs)) {
22958 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22959 + break;
22960 + } else if (probe_kernel_address(instr, opcode))
22961 break;
22962
22963 instr++;
22964 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
22965 force_sig_info(si_signo, &info, tsk);
22966 }
22967
22968 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22969 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
22970 +#endif
22971 +
22972 +#ifdef CONFIG_PAX_EMUTRAMP
22973 +static int pax_handle_fetch_fault(struct pt_regs *regs);
22974 +#endif
22975 +
22976 +#ifdef CONFIG_PAX_PAGEEXEC
22977 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
22978 +{
22979 + pgd_t *pgd;
22980 + pud_t *pud;
22981 + pmd_t *pmd;
22982 +
22983 + pgd = pgd_offset(mm, address);
22984 + if (!pgd_present(*pgd))
22985 + return NULL;
22986 + pud = pud_offset(pgd, address);
22987 + if (!pud_present(*pud))
22988 + return NULL;
22989 + pmd = pmd_offset(pud, address);
22990 + if (!pmd_present(*pmd))
22991 + return NULL;
22992 + return pmd;
22993 +}
22994 +#endif
22995 +
22996 DEFINE_SPINLOCK(pgd_lock);
22997 LIST_HEAD(pgd_list);
22998
22999 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23000 for (address = VMALLOC_START & PMD_MASK;
23001 address >= TASK_SIZE && address < FIXADDR_TOP;
23002 address += PMD_SIZE) {
23003 +
23004 +#ifdef CONFIG_PAX_PER_CPU_PGD
23005 + unsigned long cpu;
23006 +#else
23007 struct page *page;
23008 +#endif
23009
23010 spin_lock(&pgd_lock);
23011 +
23012 +#ifdef CONFIG_PAX_PER_CPU_PGD
23013 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23014 + pgd_t *pgd = get_cpu_pgd(cpu);
23015 + pmd_t *ret;
23016 +#else
23017 list_for_each_entry(page, &pgd_list, lru) {
23018 + pgd_t *pgd = page_address(page);
23019 spinlock_t *pgt_lock;
23020 pmd_t *ret;
23021
23022 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23023 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23024
23025 spin_lock(pgt_lock);
23026 - ret = vmalloc_sync_one(page_address(page), address);
23027 +#endif
23028 +
23029 + ret = vmalloc_sync_one(pgd, address);
23030 +
23031 +#ifndef CONFIG_PAX_PER_CPU_PGD
23032 spin_unlock(pgt_lock);
23033 +#endif
23034
23035 if (!ret)
23036 break;
23037 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23038 * an interrupt in the middle of a task switch..
23039 */
23040 pgd_paddr = read_cr3();
23041 +
23042 +#ifdef CONFIG_PAX_PER_CPU_PGD
23043 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23044 +#endif
23045 +
23046 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23047 if (!pmd_k)
23048 return -1;
23049 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23050 * happen within a race in page table update. In the later
23051 * case just flush:
23052 */
23053 +
23054 +#ifdef CONFIG_PAX_PER_CPU_PGD
23055 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23056 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23057 +#else
23058 pgd = pgd_offset(current->active_mm, address);
23059 +#endif
23060 +
23061 pgd_ref = pgd_offset_k(address);
23062 if (pgd_none(*pgd_ref))
23063 return -1;
23064 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23065 static int is_errata100(struct pt_regs *regs, unsigned long address)
23066 {
23067 #ifdef CONFIG_X86_64
23068 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23069 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23070 return 1;
23071 #endif
23072 return 0;
23073 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23074 }
23075
23076 static const char nx_warning[] = KERN_CRIT
23077 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23078 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23079
23080 static void
23081 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23082 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23083 if (!oops_may_print())
23084 return;
23085
23086 - if (error_code & PF_INSTR) {
23087 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23088 unsigned int level;
23089
23090 pte_t *pte = lookup_address(address, &level);
23091
23092 if (pte && pte_present(*pte) && !pte_exec(*pte))
23093 - printk(nx_warning, current_uid());
23094 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23095 }
23096
23097 +#ifdef CONFIG_PAX_KERNEXEC
23098 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23099 + if (current->signal->curr_ip)
23100 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23101 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23102 + else
23103 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23104 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23105 + }
23106 +#endif
23107 +
23108 printk(KERN_ALERT "BUG: unable to handle kernel ");
23109 if (address < PAGE_SIZE)
23110 printk(KERN_CONT "NULL pointer dereference");
23111 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23112 }
23113 #endif
23114
23115 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23116 + if (pax_is_fetch_fault(regs, error_code, address)) {
23117 +
23118 +#ifdef CONFIG_PAX_EMUTRAMP
23119 + switch (pax_handle_fetch_fault(regs)) {
23120 + case 2:
23121 + return;
23122 + }
23123 +#endif
23124 +
23125 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23126 + do_group_exit(SIGKILL);
23127 + }
23128 +#endif
23129 +
23130 if (unlikely(show_unhandled_signals))
23131 show_signal_msg(regs, error_code, address, tsk);
23132
23133 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23134 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23135 printk(KERN_ERR
23136 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23137 - tsk->comm, tsk->pid, address);
23138 + tsk->comm, task_pid_nr(tsk), address);
23139 code = BUS_MCEERR_AR;
23140 }
23141 #endif
23142 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23143 return 1;
23144 }
23145
23146 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23147 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23148 +{
23149 + pte_t *pte;
23150 + pmd_t *pmd;
23151 + spinlock_t *ptl;
23152 + unsigned char pte_mask;
23153 +
23154 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23155 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23156 + return 0;
23157 +
23158 + /* PaX: it's our fault, let's handle it if we can */
23159 +
23160 + /* PaX: take a look at read faults before acquiring any locks */
23161 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23162 + /* instruction fetch attempt from a protected page in user mode */
23163 + up_read(&mm->mmap_sem);
23164 +
23165 +#ifdef CONFIG_PAX_EMUTRAMP
23166 + switch (pax_handle_fetch_fault(regs)) {
23167 + case 2:
23168 + return 1;
23169 + }
23170 +#endif
23171 +
23172 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23173 + do_group_exit(SIGKILL);
23174 + }
23175 +
23176 + pmd = pax_get_pmd(mm, address);
23177 + if (unlikely(!pmd))
23178 + return 0;
23179 +
23180 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23181 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23182 + pte_unmap_unlock(pte, ptl);
23183 + return 0;
23184 + }
23185 +
23186 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23187 + /* write attempt to a protected page in user mode */
23188 + pte_unmap_unlock(pte, ptl);
23189 + return 0;
23190 + }
23191 +
23192 +#ifdef CONFIG_SMP
23193 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23194 +#else
23195 + if (likely(address > get_limit(regs->cs)))
23196 +#endif
23197 + {
23198 + set_pte(pte, pte_mkread(*pte));
23199 + __flush_tlb_one(address);
23200 + pte_unmap_unlock(pte, ptl);
23201 + up_read(&mm->mmap_sem);
23202 + return 1;
23203 + }
23204 +
23205 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23206 +
23207 + /*
23208 + * PaX: fill DTLB with user rights and retry
23209 + */
23210 + __asm__ __volatile__ (
23211 + "orb %2,(%1)\n"
23212 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23213 +/*
23214 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23215 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23216 + * page fault when examined during a TLB load attempt. this is true not only
23217 + * for PTEs holding a non-present entry but also present entries that will
23218 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23219 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23220 + * for our target pages since their PTEs are simply not in the TLBs at all.
23221 +
23222 + * the best thing in omitting it is that we gain around 15-20% speed in the
23223 + * fast path of the page fault handler and can get rid of tracing since we
23224 + * can no longer flush unintended entries.
23225 + */
23226 + "invlpg (%0)\n"
23227 +#endif
23228 + __copyuser_seg"testb $0,(%0)\n"
23229 + "xorb %3,(%1)\n"
23230 + :
23231 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23232 + : "memory", "cc");
23233 + pte_unmap_unlock(pte, ptl);
23234 + up_read(&mm->mmap_sem);
23235 + return 1;
23236 +}
23237 +#endif
23238 +
23239 /*
23240 * Handle a spurious fault caused by a stale TLB entry.
23241 *
23242 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23243 static inline int
23244 access_error(unsigned long error_code, struct vm_area_struct *vma)
23245 {
23246 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23247 + return 1;
23248 +
23249 if (error_code & PF_WRITE) {
23250 /* write, present and write, not present: */
23251 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23252 @@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23253 {
23254 struct vm_area_struct *vma;
23255 struct task_struct *tsk;
23256 - unsigned long address;
23257 struct mm_struct *mm;
23258 int fault;
23259 int write = error_code & PF_WRITE;
23260 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23261 (write ? FAULT_FLAG_WRITE : 0);
23262
23263 - tsk = current;
23264 - mm = tsk->mm;
23265 -
23266 /* Get the faulting address: */
23267 - address = read_cr2();
23268 + unsigned long address = read_cr2();
23269 +
23270 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23271 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23272 + if (!search_exception_tables(regs->ip)) {
23273 + bad_area_nosemaphore(regs, error_code, address);
23274 + return;
23275 + }
23276 + if (address < PAX_USER_SHADOW_BASE) {
23277 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23278 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23279 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23280 + } else
23281 + address -= PAX_USER_SHADOW_BASE;
23282 + }
23283 +#endif
23284 +
23285 + tsk = current;
23286 + mm = tsk->mm;
23287
23288 /*
23289 * Detect and handle instructions that would cause a page fault for
23290 @@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23291 * User-mode registers count as a user access even for any
23292 * potential system fault or CPU buglet:
23293 */
23294 - if (user_mode_vm(regs)) {
23295 + if (user_mode(regs)) {
23296 local_irq_enable();
23297 error_code |= PF_USER;
23298 } else {
23299 @@ -1132,6 +1338,11 @@ retry:
23300 might_sleep();
23301 }
23302
23303 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23304 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23305 + return;
23306 +#endif
23307 +
23308 vma = find_vma(mm, address);
23309 if (unlikely(!vma)) {
23310 bad_area(regs, error_code, address);
23311 @@ -1143,18 +1354,24 @@ retry:
23312 bad_area(regs, error_code, address);
23313 return;
23314 }
23315 - if (error_code & PF_USER) {
23316 - /*
23317 - * Accessing the stack below %sp is always a bug.
23318 - * The large cushion allows instructions like enter
23319 - * and pusha to work. ("enter $65535, $31" pushes
23320 - * 32 pointers and then decrements %sp by 65535.)
23321 - */
23322 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23323 - bad_area(regs, error_code, address);
23324 - return;
23325 - }
23326 + /*
23327 + * Accessing the stack below %sp is always a bug.
23328 + * The large cushion allows instructions like enter
23329 + * and pusha to work. ("enter $65535, $31" pushes
23330 + * 32 pointers and then decrements %sp by 65535.)
23331 + */
23332 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23333 + bad_area(regs, error_code, address);
23334 + return;
23335 }
23336 +
23337 +#ifdef CONFIG_PAX_SEGMEXEC
23338 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23339 + bad_area(regs, error_code, address);
23340 + return;
23341 + }
23342 +#endif
23343 +
23344 if (unlikely(expand_stack(vma, address))) {
23345 bad_area(regs, error_code, address);
23346 return;
23347 @@ -1209,3 +1426,292 @@ good_area:
23348
23349 up_read(&mm->mmap_sem);
23350 }
23351 +
23352 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23353 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23354 +{
23355 + struct mm_struct *mm = current->mm;
23356 + unsigned long ip = regs->ip;
23357 +
23358 + if (v8086_mode(regs))
23359 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23360 +
23361 +#ifdef CONFIG_PAX_PAGEEXEC
23362 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23363 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23364 + return true;
23365 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23366 + return true;
23367 + return false;
23368 + }
23369 +#endif
23370 +
23371 +#ifdef CONFIG_PAX_SEGMEXEC
23372 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23373 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23374 + return true;
23375 + return false;
23376 + }
23377 +#endif
23378 +
23379 + return false;
23380 +}
23381 +#endif
23382 +
23383 +#ifdef CONFIG_PAX_EMUTRAMP
23384 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23385 +{
23386 + int err;
23387 +
23388 + do { /* PaX: libffi trampoline emulation */
23389 + unsigned char mov, jmp;
23390 + unsigned int addr1, addr2;
23391 +
23392 +#ifdef CONFIG_X86_64
23393 + if ((regs->ip + 9) >> 32)
23394 + break;
23395 +#endif
23396 +
23397 + err = get_user(mov, (unsigned char __user *)regs->ip);
23398 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23399 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23400 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23401 +
23402 + if (err)
23403 + break;
23404 +
23405 + if (mov == 0xB8 && jmp == 0xE9) {
23406 + regs->ax = addr1;
23407 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23408 + return 2;
23409 + }
23410 + } while (0);
23411 +
23412 + do { /* PaX: gcc trampoline emulation #1 */
23413 + unsigned char mov1, mov2;
23414 + unsigned short jmp;
23415 + unsigned int addr1, addr2;
23416 +
23417 +#ifdef CONFIG_X86_64
23418 + if ((regs->ip + 11) >> 32)
23419 + break;
23420 +#endif
23421 +
23422 + err = get_user(mov1, (unsigned char __user *)regs->ip);
23423 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23424 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23425 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23426 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23427 +
23428 + if (err)
23429 + break;
23430 +
23431 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23432 + regs->cx = addr1;
23433 + regs->ax = addr2;
23434 + regs->ip = addr2;
23435 + return 2;
23436 + }
23437 + } while (0);
23438 +
23439 + do { /* PaX: gcc trampoline emulation #2 */
23440 + unsigned char mov, jmp;
23441 + unsigned int addr1, addr2;
23442 +
23443 +#ifdef CONFIG_X86_64
23444 + if ((regs->ip + 9) >> 32)
23445 + break;
23446 +#endif
23447 +
23448 + err = get_user(mov, (unsigned char __user *)regs->ip);
23449 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23450 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23451 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23452 +
23453 + if (err)
23454 + break;
23455 +
23456 + if (mov == 0xB9 && jmp == 0xE9) {
23457 + regs->cx = addr1;
23458 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23459 + return 2;
23460 + }
23461 + } while (0);
23462 +
23463 + return 1; /* PaX in action */
23464 +}
23465 +
23466 +#ifdef CONFIG_X86_64
23467 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23468 +{
23469 + int err;
23470 +
23471 + do { /* PaX: libffi trampoline emulation */
23472 + unsigned short mov1, mov2, jmp1;
23473 + unsigned char stcclc, jmp2;
23474 + unsigned long addr1, addr2;
23475 +
23476 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23477 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23478 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23479 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23480 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23481 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23482 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23483 +
23484 + if (err)
23485 + break;
23486 +
23487 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23488 + regs->r11 = addr1;
23489 + regs->r10 = addr2;
23490 + if (stcclc == 0xF8)
23491 + regs->flags &= ~X86_EFLAGS_CF;
23492 + else
23493 + regs->flags |= X86_EFLAGS_CF;
23494 + regs->ip = addr1;
23495 + return 2;
23496 + }
23497 + } while (0);
23498 +
23499 + do { /* PaX: gcc trampoline emulation #1 */
23500 + unsigned short mov1, mov2, jmp1;
23501 + unsigned char jmp2;
23502 + unsigned int addr1;
23503 + unsigned long addr2;
23504 +
23505 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23506 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23507 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23508 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23509 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23510 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23511 +
23512 + if (err)
23513 + break;
23514 +
23515 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23516 + regs->r11 = addr1;
23517 + regs->r10 = addr2;
23518 + regs->ip = addr1;
23519 + return 2;
23520 + }
23521 + } while (0);
23522 +
23523 + do { /* PaX: gcc trampoline emulation #2 */
23524 + unsigned short mov1, mov2, jmp1;
23525 + unsigned char jmp2;
23526 + unsigned long addr1, addr2;
23527 +
23528 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23529 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23530 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23531 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23532 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23533 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23534 +
23535 + if (err)
23536 + break;
23537 +
23538 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23539 + regs->r11 = addr1;
23540 + regs->r10 = addr2;
23541 + regs->ip = addr1;
23542 + return 2;
23543 + }
23544 + } while (0);
23545 +
23546 + return 1; /* PaX in action */
23547 +}
23548 +#endif
23549 +
23550 +/*
23551 + * PaX: decide what to do with offenders (regs->ip = fault address)
23552 + *
23553 + * returns 1 when task should be killed
23554 + * 2 when gcc trampoline was detected
23555 + */
23556 +static int pax_handle_fetch_fault(struct pt_regs *regs)
23557 +{
23558 + if (v8086_mode(regs))
23559 + return 1;
23560 +
23561 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23562 + return 1;
23563 +
23564 +#ifdef CONFIG_X86_32
23565 + return pax_handle_fetch_fault_32(regs);
23566 +#else
23567 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23568 + return pax_handle_fetch_fault_32(regs);
23569 + else
23570 + return pax_handle_fetch_fault_64(regs);
23571 +#endif
23572 +}
23573 +#endif
23574 +
23575 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23576 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
23577 +{
23578 + long i;
23579 +
23580 + printk(KERN_ERR "PAX: bytes at PC: ");
23581 + for (i = 0; i < 20; i++) {
23582 + unsigned char c;
23583 + if (get_user(c, (unsigned char __force_user *)pc+i))
23584 + printk(KERN_CONT "?? ");
23585 + else
23586 + printk(KERN_CONT "%02x ", c);
23587 + }
23588 + printk("\n");
23589 +
23590 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
23591 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
23592 + unsigned long c;
23593 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
23594 +#ifdef CONFIG_X86_32
23595 + printk(KERN_CONT "???????? ");
23596 +#else
23597 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
23598 + printk(KERN_CONT "???????? ???????? ");
23599 + else
23600 + printk(KERN_CONT "???????????????? ");
23601 +#endif
23602 + } else {
23603 +#ifdef CONFIG_X86_64
23604 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
23605 + printk(KERN_CONT "%08x ", (unsigned int)c);
23606 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
23607 + } else
23608 +#endif
23609 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
23610 + }
23611 + }
23612 + printk("\n");
23613 +}
23614 +#endif
23615 +
23616 +/**
23617 + * probe_kernel_write(): safely attempt to write to a location
23618 + * @dst: address to write to
23619 + * @src: pointer to the data that shall be written
23620 + * @size: size of the data chunk
23621 + *
23622 + * Safely write to address @dst from the buffer at @src. If a kernel fault
23623 + * happens, handle that and return -EFAULT.
23624 + */
23625 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
23626 +{
23627 + long ret;
23628 + mm_segment_t old_fs = get_fs();
23629 +
23630 + set_fs(KERNEL_DS);
23631 + pagefault_disable();
23632 + pax_open_kernel();
23633 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
23634 + pax_close_kernel();
23635 + pagefault_enable();
23636 + set_fs(old_fs);
23637 +
23638 + return ret ? -EFAULT : 0;
23639 +}
23640 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
23641 index dd74e46..7d26398 100644
23642 --- a/arch/x86/mm/gup.c
23643 +++ b/arch/x86/mm/gup.c
23644 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
23645 addr = start;
23646 len = (unsigned long) nr_pages << PAGE_SHIFT;
23647 end = start + len;
23648 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23649 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23650 (void __user *)start, len)))
23651 return 0;
23652
23653 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
23654 index f4f29b1..5cac4fb 100644
23655 --- a/arch/x86/mm/highmem_32.c
23656 +++ b/arch/x86/mm/highmem_32.c
23657 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
23658 idx = type + KM_TYPE_NR*smp_processor_id();
23659 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23660 BUG_ON(!pte_none(*(kmap_pte-idx)));
23661 +
23662 + pax_open_kernel();
23663 set_pte(kmap_pte-idx, mk_pte(page, prot));
23664 + pax_close_kernel();
23665 +
23666 arch_flush_lazy_mmu_mode();
23667
23668 return (void *)vaddr;
23669 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
23670 index 8ecbb4b..29efd37 100644
23671 --- a/arch/x86/mm/hugetlbpage.c
23672 +++ b/arch/x86/mm/hugetlbpage.c
23673 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
23674 struct hstate *h = hstate_file(file);
23675 struct mm_struct *mm = current->mm;
23676 struct vm_area_struct *vma;
23677 - unsigned long start_addr;
23678 + unsigned long start_addr, pax_task_size = TASK_SIZE;
23679 +
23680 +#ifdef CONFIG_PAX_SEGMEXEC
23681 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23682 + pax_task_size = SEGMEXEC_TASK_SIZE;
23683 +#endif
23684 +
23685 + pax_task_size -= PAGE_SIZE;
23686
23687 if (len > mm->cached_hole_size) {
23688 - start_addr = mm->free_area_cache;
23689 + start_addr = mm->free_area_cache;
23690 } else {
23691 - start_addr = TASK_UNMAPPED_BASE;
23692 - mm->cached_hole_size = 0;
23693 + start_addr = mm->mmap_base;
23694 + mm->cached_hole_size = 0;
23695 }
23696
23697 full_search:
23698 @@ -280,26 +287,27 @@ full_search:
23699
23700 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23701 /* At this point: (!vma || addr < vma->vm_end). */
23702 - if (TASK_SIZE - len < addr) {
23703 + if (pax_task_size - len < addr) {
23704 /*
23705 * Start a new search - just in case we missed
23706 * some holes.
23707 */
23708 - if (start_addr != TASK_UNMAPPED_BASE) {
23709 - start_addr = TASK_UNMAPPED_BASE;
23710 + if (start_addr != mm->mmap_base) {
23711 + start_addr = mm->mmap_base;
23712 mm->cached_hole_size = 0;
23713 goto full_search;
23714 }
23715 return -ENOMEM;
23716 }
23717 - if (!vma || addr + len <= vma->vm_start) {
23718 - mm->free_area_cache = addr + len;
23719 - return addr;
23720 - }
23721 + if (check_heap_stack_gap(vma, addr, len))
23722 + break;
23723 if (addr + mm->cached_hole_size < vma->vm_start)
23724 mm->cached_hole_size = vma->vm_start - addr;
23725 addr = ALIGN(vma->vm_end, huge_page_size(h));
23726 }
23727 +
23728 + mm->free_area_cache = addr + len;
23729 + return addr;
23730 }
23731
23732 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23733 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23734 {
23735 struct hstate *h = hstate_file(file);
23736 struct mm_struct *mm = current->mm;
23737 - struct vm_area_struct *vma, *prev_vma;
23738 - unsigned long base = mm->mmap_base, addr = addr0;
23739 + struct vm_area_struct *vma;
23740 + unsigned long base = mm->mmap_base, addr;
23741 unsigned long largest_hole = mm->cached_hole_size;
23742 - int first_time = 1;
23743
23744 /* don't allow allocations above current base */
23745 if (mm->free_area_cache > base)
23746 @@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23747 largest_hole = 0;
23748 mm->free_area_cache = base;
23749 }
23750 -try_again:
23751 +
23752 /* make sure it can fit in the remaining address space */
23753 if (mm->free_area_cache < len)
23754 goto fail;
23755
23756 /* either no address requested or can't fit in requested address hole */
23757 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
23758 + addr = (mm->free_area_cache - len);
23759 do {
23760 + addr &= huge_page_mask(h);
23761 + vma = find_vma(mm, addr);
23762 /*
23763 * Lookup failure means no vma is above this address,
23764 * i.e. return with success:
23765 - */
23766 - vma = find_vma(mm, addr);
23767 - if (!vma)
23768 - return addr;
23769 -
23770 - /*
23771 * new region fits between prev_vma->vm_end and
23772 * vma->vm_start, use it:
23773 */
23774 - prev_vma = vma->vm_prev;
23775 - if (addr + len <= vma->vm_start &&
23776 - (!prev_vma || (addr >= prev_vma->vm_end))) {
23777 + if (check_heap_stack_gap(vma, addr, len)) {
23778 /* remember the address as a hint for next time */
23779 - mm->cached_hole_size = largest_hole;
23780 - return (mm->free_area_cache = addr);
23781 - } else {
23782 - /* pull free_area_cache down to the first hole */
23783 - if (mm->free_area_cache == vma->vm_end) {
23784 - mm->free_area_cache = vma->vm_start;
23785 - mm->cached_hole_size = largest_hole;
23786 - }
23787 + mm->cached_hole_size = largest_hole;
23788 + return (mm->free_area_cache = addr);
23789 + }
23790 + /* pull free_area_cache down to the first hole */
23791 + if (mm->free_area_cache == vma->vm_end) {
23792 + mm->free_area_cache = vma->vm_start;
23793 + mm->cached_hole_size = largest_hole;
23794 }
23795
23796 /* remember the largest hole we saw so far */
23797 if (addr + largest_hole < vma->vm_start)
23798 - largest_hole = vma->vm_start - addr;
23799 + largest_hole = vma->vm_start - addr;
23800
23801 /* try just below the current vma->vm_start */
23802 - addr = (vma->vm_start - len) & huge_page_mask(h);
23803 - } while (len <= vma->vm_start);
23804 + addr = skip_heap_stack_gap(vma, len);
23805 + } while (!IS_ERR_VALUE(addr));
23806
23807 fail:
23808 /*
23809 - * if hint left us with no space for the requested
23810 - * mapping then try again:
23811 - */
23812 - if (first_time) {
23813 - mm->free_area_cache = base;
23814 - largest_hole = 0;
23815 - first_time = 0;
23816 - goto try_again;
23817 - }
23818 - /*
23819 * A failed mmap() very likely causes application failure,
23820 * so fall back to the bottom-up function here. This scenario
23821 * can happen with large stack limits and large mmap()
23822 * allocations.
23823 */
23824 - mm->free_area_cache = TASK_UNMAPPED_BASE;
23825 +
23826 +#ifdef CONFIG_PAX_SEGMEXEC
23827 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23828 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23829 + else
23830 +#endif
23831 +
23832 + mm->mmap_base = TASK_UNMAPPED_BASE;
23833 +
23834 +#ifdef CONFIG_PAX_RANDMMAP
23835 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23836 + mm->mmap_base += mm->delta_mmap;
23837 +#endif
23838 +
23839 + mm->free_area_cache = mm->mmap_base;
23840 mm->cached_hole_size = ~0UL;
23841 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
23842 len, pgoff, flags);
23843 @@ -388,6 +392,7 @@ fail:
23844 /*
23845 * Restore the topdown base:
23846 */
23847 + mm->mmap_base = base;
23848 mm->free_area_cache = base;
23849 mm->cached_hole_size = ~0UL;
23850
23851 @@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23852 struct hstate *h = hstate_file(file);
23853 struct mm_struct *mm = current->mm;
23854 struct vm_area_struct *vma;
23855 + unsigned long pax_task_size = TASK_SIZE;
23856
23857 if (len & ~huge_page_mask(h))
23858 return -EINVAL;
23859 - if (len > TASK_SIZE)
23860 +
23861 +#ifdef CONFIG_PAX_SEGMEXEC
23862 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23863 + pax_task_size = SEGMEXEC_TASK_SIZE;
23864 +#endif
23865 +
23866 + pax_task_size -= PAGE_SIZE;
23867 +
23868 + if (len > pax_task_size)
23869 return -ENOMEM;
23870
23871 if (flags & MAP_FIXED) {
23872 @@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23873 if (addr) {
23874 addr = ALIGN(addr, huge_page_size(h));
23875 vma = find_vma(mm, addr);
23876 - if (TASK_SIZE - len >= addr &&
23877 - (!vma || addr + len <= vma->vm_start))
23878 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
23879 return addr;
23880 }
23881 if (mm->get_unmapped_area == arch_get_unmapped_area)
23882 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
23883 index 6cabf65..77e9c1c 100644
23884 --- a/arch/x86/mm/init.c
23885 +++ b/arch/x86/mm/init.c
23886 @@ -17,6 +17,7 @@
23887 #include <asm/tlb.h>
23888 #include <asm/proto.h>
23889 #include <asm/dma.h> /* for MAX_DMA_PFN */
23890 +#include <asm/desc.h>
23891
23892 unsigned long __initdata pgt_buf_start;
23893 unsigned long __meminitdata pgt_buf_end;
23894 @@ -33,7 +34,7 @@ int direct_gbpages
23895 static void __init find_early_table_space(unsigned long end, int use_pse,
23896 int use_gbpages)
23897 {
23898 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
23899 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
23900 phys_addr_t base;
23901
23902 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
23903 @@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
23904 */
23905 int devmem_is_allowed(unsigned long pagenr)
23906 {
23907 +#ifdef CONFIG_GRKERNSEC_KMEM
23908 + /* allow BDA */
23909 + if (!pagenr)
23910 + return 1;
23911 + /* allow EBDA */
23912 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
23913 + return 1;
23914 +#else
23915 + if (!pagenr)
23916 + return 1;
23917 +#ifdef CONFIG_VM86
23918 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
23919 + return 1;
23920 +#endif
23921 +#endif
23922 +
23923 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23924 + return 1;
23925 +#ifdef CONFIG_GRKERNSEC_KMEM
23926 + /* throw out everything else below 1MB */
23927 if (pagenr <= 256)
23928 - return 1;
23929 + return 0;
23930 +#endif
23931 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
23932 return 0;
23933 if (!page_is_ram(pagenr))
23934 @@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
23935
23936 void free_initmem(void)
23937 {
23938 +
23939 +#ifdef CONFIG_PAX_KERNEXEC
23940 +#ifdef CONFIG_X86_32
23941 + /* PaX: limit KERNEL_CS to actual size */
23942 + unsigned long addr, limit;
23943 + struct desc_struct d;
23944 + int cpu;
23945 +
23946 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
23947 + limit = (limit - 1UL) >> PAGE_SHIFT;
23948 +
23949 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
23950 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23951 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
23952 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
23953 + }
23954 +
23955 + /* PaX: make KERNEL_CS read-only */
23956 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
23957 + if (!paravirt_enabled())
23958 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
23959 +/*
23960 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
23961 + pgd = pgd_offset_k(addr);
23962 + pud = pud_offset(pgd, addr);
23963 + pmd = pmd_offset(pud, addr);
23964 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23965 + }
23966 +*/
23967 +#ifdef CONFIG_X86_PAE
23968 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
23969 +/*
23970 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
23971 + pgd = pgd_offset_k(addr);
23972 + pud = pud_offset(pgd, addr);
23973 + pmd = pmd_offset(pud, addr);
23974 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23975 + }
23976 +*/
23977 +#endif
23978 +
23979 +#ifdef CONFIG_MODULES
23980 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
23981 +#endif
23982 +
23983 +#else
23984 + pgd_t *pgd;
23985 + pud_t *pud;
23986 + pmd_t *pmd;
23987 + unsigned long addr, end;
23988 +
23989 + /* PaX: make kernel code/rodata read-only, rest non-executable */
23990 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
23991 + pgd = pgd_offset_k(addr);
23992 + pud = pud_offset(pgd, addr);
23993 + pmd = pmd_offset(pud, addr);
23994 + if (!pmd_present(*pmd))
23995 + continue;
23996 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
23997 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23998 + else
23999 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24000 + }
24001 +
24002 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24003 + end = addr + KERNEL_IMAGE_SIZE;
24004 + for (; addr < end; addr += PMD_SIZE) {
24005 + pgd = pgd_offset_k(addr);
24006 + pud = pud_offset(pgd, addr);
24007 + pmd = pmd_offset(pud, addr);
24008 + if (!pmd_present(*pmd))
24009 + continue;
24010 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24011 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24012 + }
24013 +#endif
24014 +
24015 + flush_tlb_all();
24016 +#endif
24017 +
24018 free_init_pages("unused kernel memory",
24019 (unsigned long)(&__init_begin),
24020 (unsigned long)(&__init_end));
24021 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24022 index 8663f6c..829ae76 100644
24023 --- a/arch/x86/mm/init_32.c
24024 +++ b/arch/x86/mm/init_32.c
24025 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24026 }
24027
24028 /*
24029 - * Creates a middle page table and puts a pointer to it in the
24030 - * given global directory entry. This only returns the gd entry
24031 - * in non-PAE compilation mode, since the middle layer is folded.
24032 - */
24033 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24034 -{
24035 - pud_t *pud;
24036 - pmd_t *pmd_table;
24037 -
24038 -#ifdef CONFIG_X86_PAE
24039 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24040 - if (after_bootmem)
24041 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24042 - else
24043 - pmd_table = (pmd_t *)alloc_low_page();
24044 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24045 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24046 - pud = pud_offset(pgd, 0);
24047 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24048 -
24049 - return pmd_table;
24050 - }
24051 -#endif
24052 - pud = pud_offset(pgd, 0);
24053 - pmd_table = pmd_offset(pud, 0);
24054 -
24055 - return pmd_table;
24056 -}
24057 -
24058 -/*
24059 * Create a page table and place a pointer to it in a middle page
24060 * directory entry:
24061 */
24062 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24063 page_table = (pte_t *)alloc_low_page();
24064
24065 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24066 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24067 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24068 +#else
24069 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24070 +#endif
24071 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24072 }
24073
24074 return pte_offset_kernel(pmd, 0);
24075 }
24076
24077 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24078 +{
24079 + pud_t *pud;
24080 + pmd_t *pmd_table;
24081 +
24082 + pud = pud_offset(pgd, 0);
24083 + pmd_table = pmd_offset(pud, 0);
24084 +
24085 + return pmd_table;
24086 +}
24087 +
24088 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24089 {
24090 int pgd_idx = pgd_index(vaddr);
24091 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24092 int pgd_idx, pmd_idx;
24093 unsigned long vaddr;
24094 pgd_t *pgd;
24095 + pud_t *pud;
24096 pmd_t *pmd;
24097 pte_t *pte = NULL;
24098
24099 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24100 pgd = pgd_base + pgd_idx;
24101
24102 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24103 - pmd = one_md_table_init(pgd);
24104 - pmd = pmd + pmd_index(vaddr);
24105 + pud = pud_offset(pgd, vaddr);
24106 + pmd = pmd_offset(pud, vaddr);
24107 +
24108 +#ifdef CONFIG_X86_PAE
24109 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24110 +#endif
24111 +
24112 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24113 pmd++, pmd_idx++) {
24114 pte = page_table_kmap_check(one_page_table_init(pmd),
24115 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24116 }
24117 }
24118
24119 -static inline int is_kernel_text(unsigned long addr)
24120 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24121 {
24122 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24123 - return 1;
24124 - return 0;
24125 + if ((start > ktla_ktva((unsigned long)_etext) ||
24126 + end <= ktla_ktva((unsigned long)_stext)) &&
24127 + (start > ktla_ktva((unsigned long)_einittext) ||
24128 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24129 +
24130 +#ifdef CONFIG_ACPI_SLEEP
24131 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24132 +#endif
24133 +
24134 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24135 + return 0;
24136 + return 1;
24137 }
24138
24139 /*
24140 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24141 unsigned long last_map_addr = end;
24142 unsigned long start_pfn, end_pfn;
24143 pgd_t *pgd_base = swapper_pg_dir;
24144 - int pgd_idx, pmd_idx, pte_ofs;
24145 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24146 unsigned long pfn;
24147 pgd_t *pgd;
24148 + pud_t *pud;
24149 pmd_t *pmd;
24150 pte_t *pte;
24151 unsigned pages_2m, pages_4k;
24152 @@ -281,8 +282,13 @@ repeat:
24153 pfn = start_pfn;
24154 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24155 pgd = pgd_base + pgd_idx;
24156 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24157 - pmd = one_md_table_init(pgd);
24158 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24159 + pud = pud_offset(pgd, 0);
24160 + pmd = pmd_offset(pud, 0);
24161 +
24162 +#ifdef CONFIG_X86_PAE
24163 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24164 +#endif
24165
24166 if (pfn >= end_pfn)
24167 continue;
24168 @@ -294,14 +300,13 @@ repeat:
24169 #endif
24170 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24171 pmd++, pmd_idx++) {
24172 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24173 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24174
24175 /*
24176 * Map with big pages if possible, otherwise
24177 * create normal page tables:
24178 */
24179 if (use_pse) {
24180 - unsigned int addr2;
24181 pgprot_t prot = PAGE_KERNEL_LARGE;
24182 /*
24183 * first pass will use the same initial
24184 @@ -311,11 +316,7 @@ repeat:
24185 __pgprot(PTE_IDENT_ATTR |
24186 _PAGE_PSE);
24187
24188 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24189 - PAGE_OFFSET + PAGE_SIZE-1;
24190 -
24191 - if (is_kernel_text(addr) ||
24192 - is_kernel_text(addr2))
24193 + if (is_kernel_text(address, address + PMD_SIZE))
24194 prot = PAGE_KERNEL_LARGE_EXEC;
24195
24196 pages_2m++;
24197 @@ -332,7 +333,7 @@ repeat:
24198 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24199 pte += pte_ofs;
24200 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24201 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24202 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24203 pgprot_t prot = PAGE_KERNEL;
24204 /*
24205 * first pass will use the same initial
24206 @@ -340,7 +341,7 @@ repeat:
24207 */
24208 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24209
24210 - if (is_kernel_text(addr))
24211 + if (is_kernel_text(address, address + PAGE_SIZE))
24212 prot = PAGE_KERNEL_EXEC;
24213
24214 pages_4k++;
24215 @@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24216
24217 pud = pud_offset(pgd, va);
24218 pmd = pmd_offset(pud, va);
24219 - if (!pmd_present(*pmd))
24220 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24221 break;
24222
24223 pte = pte_offset_kernel(pmd, va);
24224 @@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
24225
24226 static void __init pagetable_init(void)
24227 {
24228 - pgd_t *pgd_base = swapper_pg_dir;
24229 -
24230 - permanent_kmaps_init(pgd_base);
24231 + permanent_kmaps_init(swapper_pg_dir);
24232 }
24233
24234 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24235 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24236 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24237
24238 /* user-defined highmem size */
24239 @@ -735,6 +734,12 @@ void __init mem_init(void)
24240
24241 pci_iommu_alloc();
24242
24243 +#ifdef CONFIG_PAX_PER_CPU_PGD
24244 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24245 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24246 + KERNEL_PGD_PTRS);
24247 +#endif
24248 +
24249 #ifdef CONFIG_FLATMEM
24250 BUG_ON(!mem_map);
24251 #endif
24252 @@ -761,7 +766,7 @@ void __init mem_init(void)
24253 reservedpages++;
24254
24255 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24256 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24257 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24258 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24259
24260 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24261 @@ -802,10 +807,10 @@ void __init mem_init(void)
24262 ((unsigned long)&__init_end -
24263 (unsigned long)&__init_begin) >> 10,
24264
24265 - (unsigned long)&_etext, (unsigned long)&_edata,
24266 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24267 + (unsigned long)&_sdata, (unsigned long)&_edata,
24268 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24269
24270 - (unsigned long)&_text, (unsigned long)&_etext,
24271 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24272 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24273
24274 /*
24275 @@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
24276 if (!kernel_set_to_readonly)
24277 return;
24278
24279 + start = ktla_ktva(start);
24280 pr_debug("Set kernel text: %lx - %lx for read write\n",
24281 start, start+size);
24282
24283 @@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
24284 if (!kernel_set_to_readonly)
24285 return;
24286
24287 + start = ktla_ktva(start);
24288 pr_debug("Set kernel text: %lx - %lx for read only\n",
24289 start, start+size);
24290
24291 @@ -925,6 +932,7 @@ void mark_rodata_ro(void)
24292 unsigned long start = PFN_ALIGN(_text);
24293 unsigned long size = PFN_ALIGN(_etext) - start;
24294
24295 + start = ktla_ktva(start);
24296 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24297 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24298 size >> 10);
24299 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24300 index 436a030..b8596b9 100644
24301 --- a/arch/x86/mm/init_64.c
24302 +++ b/arch/x86/mm/init_64.c
24303 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24304 * around without checking the pgd every time.
24305 */
24306
24307 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24308 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24309 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24310
24311 int force_personality32;
24312 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24313
24314 for (address = start; address <= end; address += PGDIR_SIZE) {
24315 const pgd_t *pgd_ref = pgd_offset_k(address);
24316 +
24317 +#ifdef CONFIG_PAX_PER_CPU_PGD
24318 + unsigned long cpu;
24319 +#else
24320 struct page *page;
24321 +#endif
24322
24323 if (pgd_none(*pgd_ref))
24324 continue;
24325
24326 spin_lock(&pgd_lock);
24327 +
24328 +#ifdef CONFIG_PAX_PER_CPU_PGD
24329 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24330 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24331 +#else
24332 list_for_each_entry(page, &pgd_list, lru) {
24333 pgd_t *pgd;
24334 spinlock_t *pgt_lock;
24335 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24336 /* the pgt_lock only for Xen */
24337 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24338 spin_lock(pgt_lock);
24339 +#endif
24340
24341 if (pgd_none(*pgd))
24342 set_pgd(pgd, *pgd_ref);
24343 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24344 BUG_ON(pgd_page_vaddr(*pgd)
24345 != pgd_page_vaddr(*pgd_ref));
24346
24347 +#ifndef CONFIG_PAX_PER_CPU_PGD
24348 spin_unlock(pgt_lock);
24349 +#endif
24350 +
24351 }
24352 spin_unlock(&pgd_lock);
24353 }
24354 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24355 pmd = fill_pmd(pud, vaddr);
24356 pte = fill_pte(pmd, vaddr);
24357
24358 + pax_open_kernel();
24359 set_pte(pte, new_pte);
24360 + pax_close_kernel();
24361
24362 /*
24363 * It's enough to flush this one mapping.
24364 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24365 pgd = pgd_offset_k((unsigned long)__va(phys));
24366 if (pgd_none(*pgd)) {
24367 pud = (pud_t *) spp_getpage();
24368 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24369 - _PAGE_USER));
24370 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24371 }
24372 pud = pud_offset(pgd, (unsigned long)__va(phys));
24373 if (pud_none(*pud)) {
24374 pmd = (pmd_t *) spp_getpage();
24375 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24376 - _PAGE_USER));
24377 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24378 }
24379 pmd = pmd_offset(pud, phys);
24380 BUG_ON(!pmd_none(*pmd));
24381 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24382 if (pfn >= pgt_buf_top)
24383 panic("alloc_low_page: ran out of memory");
24384
24385 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24386 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24387 clear_page(adr);
24388 *phys = pfn * PAGE_SIZE;
24389 return adr;
24390 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24391
24392 phys = __pa(virt);
24393 left = phys & (PAGE_SIZE - 1);
24394 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24395 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24396 adr = (void *)(((unsigned long)adr) | left);
24397
24398 return adr;
24399 @@ -684,6 +698,12 @@ void __init mem_init(void)
24400
24401 pci_iommu_alloc();
24402
24403 +#ifdef CONFIG_PAX_PER_CPU_PGD
24404 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24405 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24406 + KERNEL_PGD_PTRS);
24407 +#endif
24408 +
24409 /* clear_bss() already clear the empty_zero_page */
24410
24411 reservedpages = 0;
24412 @@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
24413 static struct vm_area_struct gate_vma = {
24414 .vm_start = VSYSCALL_START,
24415 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24416 - .vm_page_prot = PAGE_READONLY_EXEC,
24417 - .vm_flags = VM_READ | VM_EXEC
24418 + .vm_page_prot = PAGE_READONLY,
24419 + .vm_flags = VM_READ
24420 };
24421
24422 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24423 @@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
24424
24425 const char *arch_vma_name(struct vm_area_struct *vma)
24426 {
24427 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24428 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24429 return "[vdso]";
24430 if (vma == &gate_vma)
24431 return "[vsyscall]";
24432 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24433 index 7b179b4..6bd1777 100644
24434 --- a/arch/x86/mm/iomap_32.c
24435 +++ b/arch/x86/mm/iomap_32.c
24436 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24437 type = kmap_atomic_idx_push();
24438 idx = type + KM_TYPE_NR * smp_processor_id();
24439 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24440 +
24441 + pax_open_kernel();
24442 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24443 + pax_close_kernel();
24444 +
24445 arch_flush_lazy_mmu_mode();
24446
24447 return (void *)vaddr;
24448 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24449 index be1ef57..55f0160 100644
24450 --- a/arch/x86/mm/ioremap.c
24451 +++ b/arch/x86/mm/ioremap.c
24452 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24453 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24454 int is_ram = page_is_ram(pfn);
24455
24456 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24457 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24458 return NULL;
24459 WARN_ON_ONCE(is_ram);
24460 }
24461 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24462
24463 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24464 if (page_is_ram(start >> PAGE_SHIFT))
24465 +#ifdef CONFIG_HIGHMEM
24466 + if ((start >> PAGE_SHIFT) < max_low_pfn)
24467 +#endif
24468 return __va(phys);
24469
24470 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24471 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24472 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24473
24474 static __initdata int after_paging_init;
24475 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24476 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24477
24478 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24479 {
24480 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24481 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24482
24483 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24484 - memset(bm_pte, 0, sizeof(bm_pte));
24485 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
24486 + pmd_populate_user(&init_mm, pmd, bm_pte);
24487
24488 /*
24489 * The boot-ioremap range spans multiple pmds, for which
24490 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24491 index d87dd6d..bf3fa66 100644
24492 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
24493 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24494 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24495 * memory (e.g. tracked pages)? For now, we need this to avoid
24496 * invoking kmemcheck for PnP BIOS calls.
24497 */
24498 - if (regs->flags & X86_VM_MASK)
24499 + if (v8086_mode(regs))
24500 return false;
24501 - if (regs->cs != __KERNEL_CS)
24502 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24503 return false;
24504
24505 pte = kmemcheck_pte_lookup(address);
24506 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24507 index 845df68..1d8d29f 100644
24508 --- a/arch/x86/mm/mmap.c
24509 +++ b/arch/x86/mm/mmap.c
24510 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24511 * Leave an at least ~128 MB hole with possible stack randomization.
24512 */
24513 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24514 -#define MAX_GAP (TASK_SIZE/6*5)
24515 +#define MAX_GAP (pax_task_size/6*5)
24516
24517 static int mmap_is_legacy(void)
24518 {
24519 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24520 return rnd << PAGE_SHIFT;
24521 }
24522
24523 -static unsigned long mmap_base(void)
24524 +static unsigned long mmap_base(struct mm_struct *mm)
24525 {
24526 unsigned long gap = rlimit(RLIMIT_STACK);
24527 + unsigned long pax_task_size = TASK_SIZE;
24528 +
24529 +#ifdef CONFIG_PAX_SEGMEXEC
24530 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24531 + pax_task_size = SEGMEXEC_TASK_SIZE;
24532 +#endif
24533
24534 if (gap < MIN_GAP)
24535 gap = MIN_GAP;
24536 else if (gap > MAX_GAP)
24537 gap = MAX_GAP;
24538
24539 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24540 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24541 }
24542
24543 /*
24544 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24545 * does, but not when emulating X86_32
24546 */
24547 -static unsigned long mmap_legacy_base(void)
24548 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
24549 {
24550 - if (mmap_is_ia32())
24551 + if (mmap_is_ia32()) {
24552 +
24553 +#ifdef CONFIG_PAX_SEGMEXEC
24554 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24555 + return SEGMEXEC_TASK_UNMAPPED_BASE;
24556 + else
24557 +#endif
24558 +
24559 return TASK_UNMAPPED_BASE;
24560 - else
24561 + } else
24562 return TASK_UNMAPPED_BASE + mmap_rnd();
24563 }
24564
24565 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24566 void arch_pick_mmap_layout(struct mm_struct *mm)
24567 {
24568 if (mmap_is_legacy()) {
24569 - mm->mmap_base = mmap_legacy_base();
24570 + mm->mmap_base = mmap_legacy_base(mm);
24571 +
24572 +#ifdef CONFIG_PAX_RANDMMAP
24573 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24574 + mm->mmap_base += mm->delta_mmap;
24575 +#endif
24576 +
24577 mm->get_unmapped_area = arch_get_unmapped_area;
24578 mm->unmap_area = arch_unmap_area;
24579 } else {
24580 - mm->mmap_base = mmap_base();
24581 + mm->mmap_base = mmap_base(mm);
24582 +
24583 +#ifdef CONFIG_PAX_RANDMMAP
24584 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24585 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
24586 +#endif
24587 +
24588 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
24589 mm->unmap_area = arch_unmap_area_topdown;
24590 }
24591 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
24592 index dc0b727..dc9d71a 100644
24593 --- a/arch/x86/mm/mmio-mod.c
24594 +++ b/arch/x86/mm/mmio-mod.c
24595 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
24596 break;
24597 default:
24598 {
24599 - unsigned char *ip = (unsigned char *)instptr;
24600 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
24601 my_trace->opcode = MMIO_UNKNOWN_OP;
24602 my_trace->width = 0;
24603 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
24604 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
24605 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24606 void __iomem *addr)
24607 {
24608 - static atomic_t next_id;
24609 + static atomic_unchecked_t next_id;
24610 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
24611 /* These are page-unaligned. */
24612 struct mmiotrace_map map = {
24613 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24614 .private = trace
24615 },
24616 .phys = offset,
24617 - .id = atomic_inc_return(&next_id)
24618 + .id = atomic_inc_return_unchecked(&next_id)
24619 };
24620 map.map_id = trace->id;
24621
24622 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
24623 index b008656..773eac2 100644
24624 --- a/arch/x86/mm/pageattr-test.c
24625 +++ b/arch/x86/mm/pageattr-test.c
24626 @@ -36,7 +36,7 @@ enum {
24627
24628 static int pte_testbit(pte_t pte)
24629 {
24630 - return pte_flags(pte) & _PAGE_UNUSED1;
24631 + return pte_flags(pte) & _PAGE_CPA_TEST;
24632 }
24633
24634 struct split_state {
24635 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
24636 index e1ebde3..b1e1db38 100644
24637 --- a/arch/x86/mm/pageattr.c
24638 +++ b/arch/x86/mm/pageattr.c
24639 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24640 */
24641 #ifdef CONFIG_PCI_BIOS
24642 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
24643 - pgprot_val(forbidden) |= _PAGE_NX;
24644 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24645 #endif
24646
24647 /*
24648 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24649 * Does not cover __inittext since that is gone later on. On
24650 * 64bit we do not enforce !NX on the low mapping
24651 */
24652 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
24653 - pgprot_val(forbidden) |= _PAGE_NX;
24654 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
24655 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24656
24657 +#ifdef CONFIG_DEBUG_RODATA
24658 /*
24659 * The .rodata section needs to be read-only. Using the pfn
24660 * catches all aliases.
24661 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24662 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
24663 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
24664 pgprot_val(forbidden) |= _PAGE_RW;
24665 +#endif
24666
24667 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
24668 /*
24669 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24670 }
24671 #endif
24672
24673 +#ifdef CONFIG_PAX_KERNEXEC
24674 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
24675 + pgprot_val(forbidden) |= _PAGE_RW;
24676 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24677 + }
24678 +#endif
24679 +
24680 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
24681
24682 return prot;
24683 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
24684 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
24685 {
24686 /* change init_mm */
24687 + pax_open_kernel();
24688 set_pte_atomic(kpte, pte);
24689 +
24690 #ifdef CONFIG_X86_32
24691 if (!SHARED_KERNEL_PMD) {
24692 +
24693 +#ifdef CONFIG_PAX_PER_CPU_PGD
24694 + unsigned long cpu;
24695 +#else
24696 struct page *page;
24697 +#endif
24698
24699 +#ifdef CONFIG_PAX_PER_CPU_PGD
24700 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24701 + pgd_t *pgd = get_cpu_pgd(cpu);
24702 +#else
24703 list_for_each_entry(page, &pgd_list, lru) {
24704 - pgd_t *pgd;
24705 + pgd_t *pgd = (pgd_t *)page_address(page);
24706 +#endif
24707 +
24708 pud_t *pud;
24709 pmd_t *pmd;
24710
24711 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
24712 + pgd += pgd_index(address);
24713 pud = pud_offset(pgd, address);
24714 pmd = pmd_offset(pud, address);
24715 set_pte_atomic((pte_t *)pmd, pte);
24716 }
24717 }
24718 #endif
24719 + pax_close_kernel();
24720 }
24721
24722 static int
24723 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
24724 index f6ff57b..481690f 100644
24725 --- a/arch/x86/mm/pat.c
24726 +++ b/arch/x86/mm/pat.c
24727 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
24728
24729 if (!entry) {
24730 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
24731 - current->comm, current->pid, start, end);
24732 + current->comm, task_pid_nr(current), start, end);
24733 return -EINVAL;
24734 }
24735
24736 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24737 while (cursor < to) {
24738 if (!devmem_is_allowed(pfn)) {
24739 printk(KERN_INFO
24740 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24741 - current->comm, from, to);
24742 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
24743 + current->comm, from, to, cursor);
24744 return 0;
24745 }
24746 cursor += PAGE_SIZE;
24747 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
24748 printk(KERN_INFO
24749 "%s:%d ioremap_change_attr failed %s "
24750 "for %Lx-%Lx\n",
24751 - current->comm, current->pid,
24752 + current->comm, task_pid_nr(current),
24753 cattr_name(flags),
24754 base, (unsigned long long)(base + size));
24755 return -EINVAL;
24756 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24757 if (want_flags != flags) {
24758 printk(KERN_WARNING
24759 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
24760 - current->comm, current->pid,
24761 + current->comm, task_pid_nr(current),
24762 cattr_name(want_flags),
24763 (unsigned long long)paddr,
24764 (unsigned long long)(paddr + size),
24765 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24766 free_memtype(paddr, paddr + size);
24767 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
24768 " for %Lx-%Lx, got %s\n",
24769 - current->comm, current->pid,
24770 + current->comm, task_pid_nr(current),
24771 cattr_name(want_flags),
24772 (unsigned long long)paddr,
24773 (unsigned long long)(paddr + size),
24774 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
24775 index 9f0614d..92ae64a 100644
24776 --- a/arch/x86/mm/pf_in.c
24777 +++ b/arch/x86/mm/pf_in.c
24778 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
24779 int i;
24780 enum reason_type rv = OTHERS;
24781
24782 - p = (unsigned char *)ins_addr;
24783 + p = (unsigned char *)ktla_ktva(ins_addr);
24784 p += skip_prefix(p, &prf);
24785 p += get_opcode(p, &opcode);
24786
24787 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
24788 struct prefix_bits prf;
24789 int i;
24790
24791 - p = (unsigned char *)ins_addr;
24792 + p = (unsigned char *)ktla_ktva(ins_addr);
24793 p += skip_prefix(p, &prf);
24794 p += get_opcode(p, &opcode);
24795
24796 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
24797 struct prefix_bits prf;
24798 int i;
24799
24800 - p = (unsigned char *)ins_addr;
24801 + p = (unsigned char *)ktla_ktva(ins_addr);
24802 p += skip_prefix(p, &prf);
24803 p += get_opcode(p, &opcode);
24804
24805 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
24806 struct prefix_bits prf;
24807 int i;
24808
24809 - p = (unsigned char *)ins_addr;
24810 + p = (unsigned char *)ktla_ktva(ins_addr);
24811 p += skip_prefix(p, &prf);
24812 p += get_opcode(p, &opcode);
24813 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
24814 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
24815 struct prefix_bits prf;
24816 int i;
24817
24818 - p = (unsigned char *)ins_addr;
24819 + p = (unsigned char *)ktla_ktva(ins_addr);
24820 p += skip_prefix(p, &prf);
24821 p += get_opcode(p, &opcode);
24822 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
24823 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
24824 index 8573b83..c3b1a30 100644
24825 --- a/arch/x86/mm/pgtable.c
24826 +++ b/arch/x86/mm/pgtable.c
24827 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
24828 list_del(&page->lru);
24829 }
24830
24831 -#define UNSHARED_PTRS_PER_PGD \
24832 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24833 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24834 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
24835
24836 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24837 +{
24838 + while (count--)
24839 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
24840 +}
24841 +#endif
24842
24843 +#ifdef CONFIG_PAX_PER_CPU_PGD
24844 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24845 +{
24846 + while (count--)
24847 +
24848 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24849 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
24850 +#else
24851 + *dst++ = *src++;
24852 +#endif
24853 +
24854 +}
24855 +#endif
24856 +
24857 +#ifdef CONFIG_X86_64
24858 +#define pxd_t pud_t
24859 +#define pyd_t pgd_t
24860 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
24861 +#define pxd_free(mm, pud) pud_free((mm), (pud))
24862 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
24863 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
24864 +#define PYD_SIZE PGDIR_SIZE
24865 +#else
24866 +#define pxd_t pmd_t
24867 +#define pyd_t pud_t
24868 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
24869 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
24870 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
24871 +#define pyd_offset(mm, address) pud_offset((mm), (address))
24872 +#define PYD_SIZE PUD_SIZE
24873 +#endif
24874 +
24875 +#ifdef CONFIG_PAX_PER_CPU_PGD
24876 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
24877 +static inline void pgd_dtor(pgd_t *pgd) {}
24878 +#else
24879 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
24880 {
24881 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
24882 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
24883 pgd_list_del(pgd);
24884 spin_unlock(&pgd_lock);
24885 }
24886 +#endif
24887
24888 /*
24889 * List of all pgd's needed for non-PAE so it can invalidate entries
24890 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
24891 * -- wli
24892 */
24893
24894 -#ifdef CONFIG_X86_PAE
24895 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24896 /*
24897 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
24898 * updating the top-level pagetable entries to guarantee the
24899 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
24900 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
24901 * and initialize the kernel pmds here.
24902 */
24903 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
24904 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24905
24906 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24907 {
24908 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24909 */
24910 flush_tlb_mm(mm);
24911 }
24912 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
24913 +#define PREALLOCATED_PXDS USER_PGD_PTRS
24914 #else /* !CONFIG_X86_PAE */
24915
24916 /* No need to prepopulate any pagetable entries in non-PAE modes. */
24917 -#define PREALLOCATED_PMDS 0
24918 +#define PREALLOCATED_PXDS 0
24919
24920 #endif /* CONFIG_X86_PAE */
24921
24922 -static void free_pmds(pmd_t *pmds[])
24923 +static void free_pxds(pxd_t *pxds[])
24924 {
24925 int i;
24926
24927 - for(i = 0; i < PREALLOCATED_PMDS; i++)
24928 - if (pmds[i])
24929 - free_page((unsigned long)pmds[i]);
24930 + for(i = 0; i < PREALLOCATED_PXDS; i++)
24931 + if (pxds[i])
24932 + free_page((unsigned long)pxds[i]);
24933 }
24934
24935 -static int preallocate_pmds(pmd_t *pmds[])
24936 +static int preallocate_pxds(pxd_t *pxds[])
24937 {
24938 int i;
24939 bool failed = false;
24940
24941 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24942 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
24943 - if (pmd == NULL)
24944 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24945 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
24946 + if (pxd == NULL)
24947 failed = true;
24948 - pmds[i] = pmd;
24949 + pxds[i] = pxd;
24950 }
24951
24952 if (failed) {
24953 - free_pmds(pmds);
24954 + free_pxds(pxds);
24955 return -ENOMEM;
24956 }
24957
24958 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
24959 * preallocate which never got a corresponding vma will need to be
24960 * freed manually.
24961 */
24962 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
24963 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
24964 {
24965 int i;
24966
24967 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24968 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24969 pgd_t pgd = pgdp[i];
24970
24971 if (pgd_val(pgd) != 0) {
24972 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
24973 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
24974
24975 - pgdp[i] = native_make_pgd(0);
24976 + set_pgd(pgdp + i, native_make_pgd(0));
24977
24978 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
24979 - pmd_free(mm, pmd);
24980 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
24981 + pxd_free(mm, pxd);
24982 }
24983 }
24984 }
24985
24986 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
24987 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
24988 {
24989 - pud_t *pud;
24990 + pyd_t *pyd;
24991 unsigned long addr;
24992 int i;
24993
24994 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
24995 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
24996 return;
24997
24998 - pud = pud_offset(pgd, 0);
24999 +#ifdef CONFIG_X86_64
25000 + pyd = pyd_offset(mm, 0L);
25001 +#else
25002 + pyd = pyd_offset(pgd, 0L);
25003 +#endif
25004
25005 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25006 - i++, pud++, addr += PUD_SIZE) {
25007 - pmd_t *pmd = pmds[i];
25008 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25009 + i++, pyd++, addr += PYD_SIZE) {
25010 + pxd_t *pxd = pxds[i];
25011
25012 if (i >= KERNEL_PGD_BOUNDARY)
25013 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25014 - sizeof(pmd_t) * PTRS_PER_PMD);
25015 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25016 + sizeof(pxd_t) * PTRS_PER_PMD);
25017
25018 - pud_populate(mm, pud, pmd);
25019 + pyd_populate(mm, pyd, pxd);
25020 }
25021 }
25022
25023 pgd_t *pgd_alloc(struct mm_struct *mm)
25024 {
25025 pgd_t *pgd;
25026 - pmd_t *pmds[PREALLOCATED_PMDS];
25027 + pxd_t *pxds[PREALLOCATED_PXDS];
25028
25029 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25030
25031 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25032
25033 mm->pgd = pgd;
25034
25035 - if (preallocate_pmds(pmds) != 0)
25036 + if (preallocate_pxds(pxds) != 0)
25037 goto out_free_pgd;
25038
25039 if (paravirt_pgd_alloc(mm) != 0)
25040 - goto out_free_pmds;
25041 + goto out_free_pxds;
25042
25043 /*
25044 * Make sure that pre-populating the pmds is atomic with
25045 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25046 spin_lock(&pgd_lock);
25047
25048 pgd_ctor(mm, pgd);
25049 - pgd_prepopulate_pmd(mm, pgd, pmds);
25050 + pgd_prepopulate_pxd(mm, pgd, pxds);
25051
25052 spin_unlock(&pgd_lock);
25053
25054 return pgd;
25055
25056 -out_free_pmds:
25057 - free_pmds(pmds);
25058 +out_free_pxds:
25059 + free_pxds(pxds);
25060 out_free_pgd:
25061 free_page((unsigned long)pgd);
25062 out:
25063 @@ -295,7 +344,7 @@ out:
25064
25065 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25066 {
25067 - pgd_mop_up_pmds(mm, pgd);
25068 + pgd_mop_up_pxds(mm, pgd);
25069 pgd_dtor(pgd);
25070 paravirt_pgd_free(mm, pgd);
25071 free_page((unsigned long)pgd);
25072 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25073 index cac7184..09a39fa 100644
25074 --- a/arch/x86/mm/pgtable_32.c
25075 +++ b/arch/x86/mm/pgtable_32.c
25076 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25077 return;
25078 }
25079 pte = pte_offset_kernel(pmd, vaddr);
25080 +
25081 + pax_open_kernel();
25082 if (pte_val(pteval))
25083 set_pte_at(&init_mm, vaddr, pte, pteval);
25084 else
25085 pte_clear(&init_mm, vaddr, pte);
25086 + pax_close_kernel();
25087
25088 /*
25089 * It's enough to flush this one mapping.
25090 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25091 index 410531d..0f16030 100644
25092 --- a/arch/x86/mm/setup_nx.c
25093 +++ b/arch/x86/mm/setup_nx.c
25094 @@ -5,8 +5,10 @@
25095 #include <asm/pgtable.h>
25096 #include <asm/proto.h>
25097
25098 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25099 static int disable_nx __cpuinitdata;
25100
25101 +#ifndef CONFIG_PAX_PAGEEXEC
25102 /*
25103 * noexec = on|off
25104 *
25105 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25106 return 0;
25107 }
25108 early_param("noexec", noexec_setup);
25109 +#endif
25110 +
25111 +#endif
25112
25113 void __cpuinit x86_configure_nx(void)
25114 {
25115 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25116 if (cpu_has_nx && !disable_nx)
25117 __supported_pte_mask |= _PAGE_NX;
25118 else
25119 +#endif
25120 __supported_pte_mask &= ~_PAGE_NX;
25121 }
25122
25123 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25124 index d6c0418..06a0ad5 100644
25125 --- a/arch/x86/mm/tlb.c
25126 +++ b/arch/x86/mm/tlb.c
25127 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25128 BUG();
25129 cpumask_clear_cpu(cpu,
25130 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25131 +
25132 +#ifndef CONFIG_PAX_PER_CPU_PGD
25133 load_cr3(swapper_pg_dir);
25134 +#endif
25135 +
25136 }
25137 EXPORT_SYMBOL_GPL(leave_mm);
25138
25139 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25140 index 6687022..ceabcfa 100644
25141 --- a/arch/x86/net/bpf_jit.S
25142 +++ b/arch/x86/net/bpf_jit.S
25143 @@ -9,6 +9,7 @@
25144 */
25145 #include <linux/linkage.h>
25146 #include <asm/dwarf2.h>
25147 +#include <asm/alternative-asm.h>
25148
25149 /*
25150 * Calling convention :
25151 @@ -35,6 +36,7 @@ sk_load_word:
25152 jle bpf_slow_path_word
25153 mov (SKBDATA,%rsi),%eax
25154 bswap %eax /* ntohl() */
25155 + pax_force_retaddr
25156 ret
25157
25158
25159 @@ -53,6 +55,7 @@ sk_load_half:
25160 jle bpf_slow_path_half
25161 movzwl (SKBDATA,%rsi),%eax
25162 rol $8,%ax # ntohs()
25163 + pax_force_retaddr
25164 ret
25165
25166 sk_load_byte_ind:
25167 @@ -66,6 +69,7 @@ sk_load_byte:
25168 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25169 jle bpf_slow_path_byte
25170 movzbl (SKBDATA,%rsi),%eax
25171 + pax_force_retaddr
25172 ret
25173
25174 /**
25175 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25176 movzbl (SKBDATA,%rsi),%ebx
25177 and $15,%bl
25178 shl $2,%bl
25179 + pax_force_retaddr
25180 ret
25181 CFI_ENDPROC
25182 ENDPROC(sk_load_byte_msh)
25183 @@ -91,6 +96,7 @@ bpf_error:
25184 xor %eax,%eax
25185 mov -8(%rbp),%rbx
25186 leaveq
25187 + pax_force_retaddr
25188 ret
25189
25190 /* rsi contains offset and can be scratched */
25191 @@ -113,6 +119,7 @@ bpf_slow_path_word:
25192 js bpf_error
25193 mov -12(%rbp),%eax
25194 bswap %eax
25195 + pax_force_retaddr
25196 ret
25197
25198 bpf_slow_path_half:
25199 @@ -121,12 +128,14 @@ bpf_slow_path_half:
25200 mov -12(%rbp),%ax
25201 rol $8,%ax
25202 movzwl %ax,%eax
25203 + pax_force_retaddr
25204 ret
25205
25206 bpf_slow_path_byte:
25207 bpf_slow_path_common(1)
25208 js bpf_error
25209 movzbl -12(%rbp),%eax
25210 + pax_force_retaddr
25211 ret
25212
25213 bpf_slow_path_byte_msh:
25214 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25215 and $15,%al
25216 shl $2,%al
25217 xchg %eax,%ebx
25218 + pax_force_retaddr
25219 ret
25220 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25221 index 7c1b765..8c072c6 100644
25222 --- a/arch/x86/net/bpf_jit_comp.c
25223 +++ b/arch/x86/net/bpf_jit_comp.c
25224 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25225 set_fs(old_fs);
25226 }
25227
25228 +struct bpf_jit_work {
25229 + struct work_struct work;
25230 + void *image;
25231 +};
25232
25233 void bpf_jit_compile(struct sk_filter *fp)
25234 {
25235 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25236 if (addrs == NULL)
25237 return;
25238
25239 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25240 + if (!fp->work)
25241 + goto out;
25242 +
25243 /* Before first pass, make a rough estimation of addrs[]
25244 * each bpf instruction is translated to less than 64 bytes
25245 */
25246 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25247 func = sk_load_word;
25248 common_load: seen |= SEEN_DATAREF;
25249 if ((int)K < 0)
25250 - goto out;
25251 + goto error;
25252 t_offset = func - (image + addrs[i]);
25253 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25254 EMIT1_off32(0xe8, t_offset); /* call */
25255 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25256 break;
25257 default:
25258 /* hmm, too complex filter, give up with jit compiler */
25259 - goto out;
25260 + goto error;
25261 }
25262 ilen = prog - temp;
25263 if (image) {
25264 if (unlikely(proglen + ilen > oldproglen)) {
25265 pr_err("bpb_jit_compile fatal error\n");
25266 - kfree(addrs);
25267 - module_free(NULL, image);
25268 - return;
25269 + module_free_exec(NULL, image);
25270 + goto error;
25271 }
25272 + pax_open_kernel();
25273 memcpy(image + proglen, temp, ilen);
25274 + pax_close_kernel();
25275 }
25276 proglen += ilen;
25277 addrs[i] = proglen;
25278 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25279 break;
25280 }
25281 if (proglen == oldproglen) {
25282 - image = module_alloc(max_t(unsigned int,
25283 - proglen,
25284 - sizeof(struct work_struct)));
25285 + image = module_alloc_exec(proglen);
25286 if (!image)
25287 - goto out;
25288 + goto error;
25289 }
25290 oldproglen = proglen;
25291 }
25292 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25293 bpf_flush_icache(image, image + proglen);
25294
25295 fp->bpf_func = (void *)image;
25296 - }
25297 + } else
25298 +error:
25299 + kfree(fp->work);
25300 +
25301 out:
25302 kfree(addrs);
25303 return;
25304 @@ -645,18 +655,20 @@ out:
25305
25306 static void jit_free_defer(struct work_struct *arg)
25307 {
25308 - module_free(NULL, arg);
25309 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25310 + kfree(arg);
25311 }
25312
25313 /* run from softirq, we must use a work_struct to call
25314 - * module_free() from process context
25315 + * module_free_exec() from process context
25316 */
25317 void bpf_jit_free(struct sk_filter *fp)
25318 {
25319 if (fp->bpf_func != sk_run_filter) {
25320 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25321 + struct work_struct *work = &fp->work->work;
25322
25323 INIT_WORK(work, jit_free_defer);
25324 + fp->work->image = fp->bpf_func;
25325 schedule_work(work);
25326 }
25327 }
25328 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25329 index bff89df..377758a 100644
25330 --- a/arch/x86/oprofile/backtrace.c
25331 +++ b/arch/x86/oprofile/backtrace.c
25332 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25333 struct stack_frame_ia32 *fp;
25334 unsigned long bytes;
25335
25336 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25337 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25338 if (bytes != sizeof(bufhead))
25339 return NULL;
25340
25341 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25342 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25343
25344 oprofile_add_trace(bufhead[0].return_address);
25345
25346 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25347 struct stack_frame bufhead[2];
25348 unsigned long bytes;
25349
25350 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25351 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25352 if (bytes != sizeof(bufhead))
25353 return NULL;
25354
25355 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25356 {
25357 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25358
25359 - if (!user_mode_vm(regs)) {
25360 + if (!user_mode(regs)) {
25361 unsigned long stack = kernel_stack_pointer(regs);
25362 if (depth)
25363 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25364 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25365 index cb29191..036766d 100644
25366 --- a/arch/x86/pci/mrst.c
25367 +++ b/arch/x86/pci/mrst.c
25368 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25369 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25370 pci_mmcfg_late_init();
25371 pcibios_enable_irq = mrst_pci_irq_enable;
25372 - pci_root_ops = pci_mrst_ops;
25373 + pax_open_kernel();
25374 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25375 + pax_close_kernel();
25376 /* Continue with standard init */
25377 return 1;
25378 }
25379 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25380 index da8fe05..7ee6704 100644
25381 --- a/arch/x86/pci/pcbios.c
25382 +++ b/arch/x86/pci/pcbios.c
25383 @@ -79,50 +79,93 @@ union bios32 {
25384 static struct {
25385 unsigned long address;
25386 unsigned short segment;
25387 -} bios32_indirect = { 0, __KERNEL_CS };
25388 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25389
25390 /*
25391 * Returns the entry point for the given service, NULL on error
25392 */
25393
25394 -static unsigned long bios32_service(unsigned long service)
25395 +static unsigned long __devinit bios32_service(unsigned long service)
25396 {
25397 unsigned char return_code; /* %al */
25398 unsigned long address; /* %ebx */
25399 unsigned long length; /* %ecx */
25400 unsigned long entry; /* %edx */
25401 unsigned long flags;
25402 + struct desc_struct d, *gdt;
25403
25404 local_irq_save(flags);
25405 - __asm__("lcall *(%%edi); cld"
25406 +
25407 + gdt = get_cpu_gdt_table(smp_processor_id());
25408 +
25409 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25410 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25411 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25412 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25413 +
25414 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25415 : "=a" (return_code),
25416 "=b" (address),
25417 "=c" (length),
25418 "=d" (entry)
25419 : "0" (service),
25420 "1" (0),
25421 - "D" (&bios32_indirect));
25422 + "D" (&bios32_indirect),
25423 + "r"(__PCIBIOS_DS)
25424 + : "memory");
25425 +
25426 + pax_open_kernel();
25427 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25428 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25429 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25430 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25431 + pax_close_kernel();
25432 +
25433 local_irq_restore(flags);
25434
25435 switch (return_code) {
25436 - case 0:
25437 - return address + entry;
25438 - case 0x80: /* Not present */
25439 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25440 - return 0;
25441 - default: /* Shouldn't happen */
25442 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25443 - service, return_code);
25444 + case 0: {
25445 + int cpu;
25446 + unsigned char flags;
25447 +
25448 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25449 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25450 + printk(KERN_WARNING "bios32_service: not valid\n");
25451 return 0;
25452 + }
25453 + address = address + PAGE_OFFSET;
25454 + length += 16UL; /* some BIOSs underreport this... */
25455 + flags = 4;
25456 + if (length >= 64*1024*1024) {
25457 + length >>= PAGE_SHIFT;
25458 + flags |= 8;
25459 + }
25460 +
25461 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25462 + gdt = get_cpu_gdt_table(cpu);
25463 + pack_descriptor(&d, address, length, 0x9b, flags);
25464 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25465 + pack_descriptor(&d, address, length, 0x93, flags);
25466 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25467 + }
25468 + return entry;
25469 + }
25470 + case 0x80: /* Not present */
25471 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25472 + return 0;
25473 + default: /* Shouldn't happen */
25474 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25475 + service, return_code);
25476 + return 0;
25477 }
25478 }
25479
25480 static struct {
25481 unsigned long address;
25482 unsigned short segment;
25483 -} pci_indirect = { 0, __KERNEL_CS };
25484 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25485
25486 -static int pci_bios_present;
25487 +static int pci_bios_present __read_only;
25488
25489 static int __devinit check_pcibios(void)
25490 {
25491 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25492 unsigned long flags, pcibios_entry;
25493
25494 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25495 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25496 + pci_indirect.address = pcibios_entry;
25497
25498 local_irq_save(flags);
25499 - __asm__(
25500 - "lcall *(%%edi); cld\n\t"
25501 + __asm__("movw %w6, %%ds\n\t"
25502 + "lcall *%%ss:(%%edi); cld\n\t"
25503 + "push %%ss\n\t"
25504 + "pop %%ds\n\t"
25505 "jc 1f\n\t"
25506 "xor %%ah, %%ah\n"
25507 "1:"
25508 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25509 "=b" (ebx),
25510 "=c" (ecx)
25511 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25512 - "D" (&pci_indirect)
25513 + "D" (&pci_indirect),
25514 + "r" (__PCIBIOS_DS)
25515 : "memory");
25516 local_irq_restore(flags);
25517
25518 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25519
25520 switch (len) {
25521 case 1:
25522 - __asm__("lcall *(%%esi); cld\n\t"
25523 + __asm__("movw %w6, %%ds\n\t"
25524 + "lcall *%%ss:(%%esi); cld\n\t"
25525 + "push %%ss\n\t"
25526 + "pop %%ds\n\t"
25527 "jc 1f\n\t"
25528 "xor %%ah, %%ah\n"
25529 "1:"
25530 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25531 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25532 "b" (bx),
25533 "D" ((long)reg),
25534 - "S" (&pci_indirect));
25535 + "S" (&pci_indirect),
25536 + "r" (__PCIBIOS_DS));
25537 /*
25538 * Zero-extend the result beyond 8 bits, do not trust the
25539 * BIOS having done it:
25540 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25541 *value &= 0xff;
25542 break;
25543 case 2:
25544 - __asm__("lcall *(%%esi); cld\n\t"
25545 + __asm__("movw %w6, %%ds\n\t"
25546 + "lcall *%%ss:(%%esi); cld\n\t"
25547 + "push %%ss\n\t"
25548 + "pop %%ds\n\t"
25549 "jc 1f\n\t"
25550 "xor %%ah, %%ah\n"
25551 "1:"
25552 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25553 : "1" (PCIBIOS_READ_CONFIG_WORD),
25554 "b" (bx),
25555 "D" ((long)reg),
25556 - "S" (&pci_indirect));
25557 + "S" (&pci_indirect),
25558 + "r" (__PCIBIOS_DS));
25559 /*
25560 * Zero-extend the result beyond 16 bits, do not trust the
25561 * BIOS having done it:
25562 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25563 *value &= 0xffff;
25564 break;
25565 case 4:
25566 - __asm__("lcall *(%%esi); cld\n\t"
25567 + __asm__("movw %w6, %%ds\n\t"
25568 + "lcall *%%ss:(%%esi); cld\n\t"
25569 + "push %%ss\n\t"
25570 + "pop %%ds\n\t"
25571 "jc 1f\n\t"
25572 "xor %%ah, %%ah\n"
25573 "1:"
25574 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25575 : "1" (PCIBIOS_READ_CONFIG_DWORD),
25576 "b" (bx),
25577 "D" ((long)reg),
25578 - "S" (&pci_indirect));
25579 + "S" (&pci_indirect),
25580 + "r" (__PCIBIOS_DS));
25581 break;
25582 }
25583
25584 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25585
25586 switch (len) {
25587 case 1:
25588 - __asm__("lcall *(%%esi); cld\n\t"
25589 + __asm__("movw %w6, %%ds\n\t"
25590 + "lcall *%%ss:(%%esi); cld\n\t"
25591 + "push %%ss\n\t"
25592 + "pop %%ds\n\t"
25593 "jc 1f\n\t"
25594 "xor %%ah, %%ah\n"
25595 "1:"
25596 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25597 "c" (value),
25598 "b" (bx),
25599 "D" ((long)reg),
25600 - "S" (&pci_indirect));
25601 + "S" (&pci_indirect),
25602 + "r" (__PCIBIOS_DS));
25603 break;
25604 case 2:
25605 - __asm__("lcall *(%%esi); cld\n\t"
25606 + __asm__("movw %w6, %%ds\n\t"
25607 + "lcall *%%ss:(%%esi); cld\n\t"
25608 + "push %%ss\n\t"
25609 + "pop %%ds\n\t"
25610 "jc 1f\n\t"
25611 "xor %%ah, %%ah\n"
25612 "1:"
25613 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25614 "c" (value),
25615 "b" (bx),
25616 "D" ((long)reg),
25617 - "S" (&pci_indirect));
25618 + "S" (&pci_indirect),
25619 + "r" (__PCIBIOS_DS));
25620 break;
25621 case 4:
25622 - __asm__("lcall *(%%esi); cld\n\t"
25623 + __asm__("movw %w6, %%ds\n\t"
25624 + "lcall *%%ss:(%%esi); cld\n\t"
25625 + "push %%ss\n\t"
25626 + "pop %%ds\n\t"
25627 "jc 1f\n\t"
25628 "xor %%ah, %%ah\n"
25629 "1:"
25630 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25631 "c" (value),
25632 "b" (bx),
25633 "D" ((long)reg),
25634 - "S" (&pci_indirect));
25635 + "S" (&pci_indirect),
25636 + "r" (__PCIBIOS_DS));
25637 break;
25638 }
25639
25640 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25641
25642 DBG("PCI: Fetching IRQ routing table... ");
25643 __asm__("push %%es\n\t"
25644 + "movw %w8, %%ds\n\t"
25645 "push %%ds\n\t"
25646 "pop %%es\n\t"
25647 - "lcall *(%%esi); cld\n\t"
25648 + "lcall *%%ss:(%%esi); cld\n\t"
25649 "pop %%es\n\t"
25650 + "push %%ss\n\t"
25651 + "pop %%ds\n"
25652 "jc 1f\n\t"
25653 "xor %%ah, %%ah\n"
25654 "1:"
25655 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25656 "1" (0),
25657 "D" ((long) &opt),
25658 "S" (&pci_indirect),
25659 - "m" (opt)
25660 + "m" (opt),
25661 + "r" (__PCIBIOS_DS)
25662 : "memory");
25663 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
25664 if (ret & 0xff00)
25665 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25666 {
25667 int ret;
25668
25669 - __asm__("lcall *(%%esi); cld\n\t"
25670 + __asm__("movw %w5, %%ds\n\t"
25671 + "lcall *%%ss:(%%esi); cld\n\t"
25672 + "push %%ss\n\t"
25673 + "pop %%ds\n"
25674 "jc 1f\n\t"
25675 "xor %%ah, %%ah\n"
25676 "1:"
25677 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25678 : "0" (PCIBIOS_SET_PCI_HW_INT),
25679 "b" ((dev->bus->number << 8) | dev->devfn),
25680 "c" ((irq << 8) | (pin + 10)),
25681 - "S" (&pci_indirect));
25682 + "S" (&pci_indirect),
25683 + "r" (__PCIBIOS_DS));
25684 return !(ret & 0xff00);
25685 }
25686 EXPORT_SYMBOL(pcibios_set_irq_routing);
25687 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
25688 index 40e4469..1ab536e 100644
25689 --- a/arch/x86/platform/efi/efi_32.c
25690 +++ b/arch/x86/platform/efi/efi_32.c
25691 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
25692 {
25693 struct desc_ptr gdt_descr;
25694
25695 +#ifdef CONFIG_PAX_KERNEXEC
25696 + struct desc_struct d;
25697 +#endif
25698 +
25699 local_irq_save(efi_rt_eflags);
25700
25701 load_cr3(initial_page_table);
25702 __flush_tlb_all();
25703
25704 +#ifdef CONFIG_PAX_KERNEXEC
25705 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
25706 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25707 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
25708 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25709 +#endif
25710 +
25711 gdt_descr.address = __pa(get_cpu_gdt_table(0));
25712 gdt_descr.size = GDT_SIZE - 1;
25713 load_gdt(&gdt_descr);
25714 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
25715 {
25716 struct desc_ptr gdt_descr;
25717
25718 +#ifdef CONFIG_PAX_KERNEXEC
25719 + struct desc_struct d;
25720 +
25721 + memset(&d, 0, sizeof d);
25722 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25723 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25724 +#endif
25725 +
25726 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
25727 gdt_descr.size = GDT_SIZE - 1;
25728 load_gdt(&gdt_descr);
25729 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
25730 index fbe66e6..c5c0dd2 100644
25731 --- a/arch/x86/platform/efi/efi_stub_32.S
25732 +++ b/arch/x86/platform/efi/efi_stub_32.S
25733 @@ -6,7 +6,9 @@
25734 */
25735
25736 #include <linux/linkage.h>
25737 +#include <linux/init.h>
25738 #include <asm/page_types.h>
25739 +#include <asm/segment.h>
25740
25741 /*
25742 * efi_call_phys(void *, ...) is a function with variable parameters.
25743 @@ -20,7 +22,7 @@
25744 * service functions will comply with gcc calling convention, too.
25745 */
25746
25747 -.text
25748 +__INIT
25749 ENTRY(efi_call_phys)
25750 /*
25751 * 0. The function can only be called in Linux kernel. So CS has been
25752 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
25753 * The mapping of lower virtual memory has been created in prelog and
25754 * epilog.
25755 */
25756 - movl $1f, %edx
25757 - subl $__PAGE_OFFSET, %edx
25758 - jmp *%edx
25759 + movl $(__KERNEXEC_EFI_DS), %edx
25760 + mov %edx, %ds
25761 + mov %edx, %es
25762 + mov %edx, %ss
25763 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
25764 1:
25765
25766 /*
25767 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
25768 * parameter 2, ..., param n. To make things easy, we save the return
25769 * address of efi_call_phys in a global variable.
25770 */
25771 - popl %edx
25772 - movl %edx, saved_return_addr
25773 - /* get the function pointer into ECX*/
25774 - popl %ecx
25775 - movl %ecx, efi_rt_function_ptr
25776 - movl $2f, %edx
25777 - subl $__PAGE_OFFSET, %edx
25778 - pushl %edx
25779 + popl (saved_return_addr)
25780 + popl (efi_rt_function_ptr)
25781
25782 /*
25783 * 3. Clear PG bit in %CR0.
25784 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
25785 /*
25786 * 5. Call the physical function.
25787 */
25788 - jmp *%ecx
25789 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
25790
25791 -2:
25792 /*
25793 * 6. After EFI runtime service returns, control will return to
25794 * following instruction. We'd better readjust stack pointer first.
25795 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
25796 movl %cr0, %edx
25797 orl $0x80000000, %edx
25798 movl %edx, %cr0
25799 - jmp 1f
25800 -1:
25801 +
25802 /*
25803 * 8. Now restore the virtual mode from flat mode by
25804 * adding EIP with PAGE_OFFSET.
25805 */
25806 - movl $1f, %edx
25807 - jmp *%edx
25808 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
25809 1:
25810 + movl $(__KERNEL_DS), %edx
25811 + mov %edx, %ds
25812 + mov %edx, %es
25813 + mov %edx, %ss
25814
25815 /*
25816 * 9. Balance the stack. And because EAX contain the return value,
25817 * we'd better not clobber it.
25818 */
25819 - leal efi_rt_function_ptr, %edx
25820 - movl (%edx), %ecx
25821 - pushl %ecx
25822 + pushl (efi_rt_function_ptr)
25823
25824 /*
25825 - * 10. Push the saved return address onto the stack and return.
25826 + * 10. Return to the saved return address.
25827 */
25828 - leal saved_return_addr, %edx
25829 - movl (%edx), %ecx
25830 - pushl %ecx
25831 - ret
25832 + jmpl *(saved_return_addr)
25833 ENDPROC(efi_call_phys)
25834 .previous
25835
25836 -.data
25837 +__INITDATA
25838 saved_return_addr:
25839 .long 0
25840 efi_rt_function_ptr:
25841 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
25842 index 4c07cca..2c8427d 100644
25843 --- a/arch/x86/platform/efi/efi_stub_64.S
25844 +++ b/arch/x86/platform/efi/efi_stub_64.S
25845 @@ -7,6 +7,7 @@
25846 */
25847
25848 #include <linux/linkage.h>
25849 +#include <asm/alternative-asm.h>
25850
25851 #define SAVE_XMM \
25852 mov %rsp, %rax; \
25853 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
25854 call *%rdi
25855 addq $32, %rsp
25856 RESTORE_XMM
25857 + pax_force_retaddr 0, 1
25858 ret
25859 ENDPROC(efi_call0)
25860
25861 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
25862 call *%rdi
25863 addq $32, %rsp
25864 RESTORE_XMM
25865 + pax_force_retaddr 0, 1
25866 ret
25867 ENDPROC(efi_call1)
25868
25869 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
25870 call *%rdi
25871 addq $32, %rsp
25872 RESTORE_XMM
25873 + pax_force_retaddr 0, 1
25874 ret
25875 ENDPROC(efi_call2)
25876
25877 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
25878 call *%rdi
25879 addq $32, %rsp
25880 RESTORE_XMM
25881 + pax_force_retaddr 0, 1
25882 ret
25883 ENDPROC(efi_call3)
25884
25885 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
25886 call *%rdi
25887 addq $32, %rsp
25888 RESTORE_XMM
25889 + pax_force_retaddr 0, 1
25890 ret
25891 ENDPROC(efi_call4)
25892
25893 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
25894 call *%rdi
25895 addq $48, %rsp
25896 RESTORE_XMM
25897 + pax_force_retaddr 0, 1
25898 ret
25899 ENDPROC(efi_call5)
25900
25901 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
25902 call *%rdi
25903 addq $48, %rsp
25904 RESTORE_XMM
25905 + pax_force_retaddr 0, 1
25906 ret
25907 ENDPROC(efi_call6)
25908 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
25909 index 475e2cd..1b8e708 100644
25910 --- a/arch/x86/platform/mrst/mrst.c
25911 +++ b/arch/x86/platform/mrst/mrst.c
25912 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
25913 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
25914 int sfi_mrtc_num;
25915
25916 -static void mrst_power_off(void)
25917 +static __noreturn void mrst_power_off(void)
25918 {
25919 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25920 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
25921 + BUG();
25922 }
25923
25924 -static void mrst_reboot(void)
25925 +static __noreturn void mrst_reboot(void)
25926 {
25927 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25928 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
25929 else
25930 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
25931 + BUG();
25932 }
25933
25934 /* parse all the mtimer info to a static mtimer array */
25935 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
25936 index f10c0af..3ec1f95 100644
25937 --- a/arch/x86/power/cpu.c
25938 +++ b/arch/x86/power/cpu.c
25939 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
25940 static void fix_processor_context(void)
25941 {
25942 int cpu = smp_processor_id();
25943 - struct tss_struct *t = &per_cpu(init_tss, cpu);
25944 + struct tss_struct *t = init_tss + cpu;
25945
25946 set_tss_desc(cpu, t); /*
25947 * This just modifies memory; should not be
25948 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
25949 */
25950
25951 #ifdef CONFIG_X86_64
25952 + pax_open_kernel();
25953 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
25954 + pax_close_kernel();
25955
25956 syscall_init(); /* This sets MSR_*STAR and related */
25957 #endif
25958 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
25959 index 5d17950..2253fc9 100644
25960 --- a/arch/x86/vdso/Makefile
25961 +++ b/arch/x86/vdso/Makefile
25962 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
25963 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
25964 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
25965
25966 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25967 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25968 GCOV_PROFILE := n
25969
25970 #
25971 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
25972 index 468d591..8e80a0a 100644
25973 --- a/arch/x86/vdso/vdso32-setup.c
25974 +++ b/arch/x86/vdso/vdso32-setup.c
25975 @@ -25,6 +25,7 @@
25976 #include <asm/tlbflush.h>
25977 #include <asm/vdso.h>
25978 #include <asm/proto.h>
25979 +#include <asm/mman.h>
25980
25981 enum {
25982 VDSO_DISABLED = 0,
25983 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
25984 void enable_sep_cpu(void)
25985 {
25986 int cpu = get_cpu();
25987 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
25988 + struct tss_struct *tss = init_tss + cpu;
25989
25990 if (!boot_cpu_has(X86_FEATURE_SEP)) {
25991 put_cpu();
25992 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
25993 gate_vma.vm_start = FIXADDR_USER_START;
25994 gate_vma.vm_end = FIXADDR_USER_END;
25995 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
25996 - gate_vma.vm_page_prot = __P101;
25997 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
25998 /*
25999 * Make sure the vDSO gets into every core dump.
26000 * Dumping its contents makes post-mortem fully interpretable later
26001 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26002 if (compat)
26003 addr = VDSO_HIGH_BASE;
26004 else {
26005 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26006 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26007 if (IS_ERR_VALUE(addr)) {
26008 ret = addr;
26009 goto up_fail;
26010 }
26011 }
26012
26013 - current->mm->context.vdso = (void *)addr;
26014 + current->mm->context.vdso = addr;
26015
26016 if (compat_uses_vma || !compat) {
26017 /*
26018 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26019 }
26020
26021 current_thread_info()->sysenter_return =
26022 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26023 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26024
26025 up_fail:
26026 if (ret)
26027 - current->mm->context.vdso = NULL;
26028 + current->mm->context.vdso = 0;
26029
26030 up_write(&mm->mmap_sem);
26031
26032 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26033
26034 const char *arch_vma_name(struct vm_area_struct *vma)
26035 {
26036 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26037 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26038 return "[vdso]";
26039 +
26040 +#ifdef CONFIG_PAX_SEGMEXEC
26041 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26042 + return "[vdso]";
26043 +#endif
26044 +
26045 return NULL;
26046 }
26047
26048 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26049 * Check to see if the corresponding task was created in compat vdso
26050 * mode.
26051 */
26052 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26053 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26054 return &gate_vma;
26055 return NULL;
26056 }
26057 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26058 index 153407c..611cba9 100644
26059 --- a/arch/x86/vdso/vma.c
26060 +++ b/arch/x86/vdso/vma.c
26061 @@ -16,8 +16,6 @@
26062 #include <asm/vdso.h>
26063 #include <asm/page.h>
26064
26065 -unsigned int __read_mostly vdso_enabled = 1;
26066 -
26067 extern char vdso_start[], vdso_end[];
26068 extern unsigned short vdso_sync_cpuid;
26069
26070 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26071 * unaligned here as a result of stack start randomization.
26072 */
26073 addr = PAGE_ALIGN(addr);
26074 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26075
26076 return addr;
26077 }
26078 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26079 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26080 {
26081 struct mm_struct *mm = current->mm;
26082 - unsigned long addr;
26083 + unsigned long addr = 0;
26084 int ret;
26085
26086 - if (!vdso_enabled)
26087 - return 0;
26088 -
26089 down_write(&mm->mmap_sem);
26090 +
26091 +#ifdef CONFIG_PAX_RANDMMAP
26092 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26093 +#endif
26094 +
26095 addr = vdso_addr(mm->start_stack, vdso_size);
26096 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26097 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26098 if (IS_ERR_VALUE(addr)) {
26099 ret = addr;
26100 goto up_fail;
26101 }
26102
26103 - current->mm->context.vdso = (void *)addr;
26104 + mm->context.vdso = addr;
26105
26106 ret = install_special_mapping(mm, addr, vdso_size,
26107 VM_READ|VM_EXEC|
26108 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26109 VM_ALWAYSDUMP,
26110 vdso_pages);
26111 - if (ret) {
26112 - current->mm->context.vdso = NULL;
26113 - goto up_fail;
26114 - }
26115 +
26116 + if (ret)
26117 + mm->context.vdso = 0;
26118
26119 up_fail:
26120 up_write(&mm->mmap_sem);
26121 return ret;
26122 }
26123 -
26124 -static __init int vdso_setup(char *s)
26125 -{
26126 - vdso_enabled = simple_strtoul(s, NULL, 0);
26127 - return 0;
26128 -}
26129 -__setup("vdso=", vdso_setup);
26130 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26131 index 4172af8..2c8ed7f 100644
26132 --- a/arch/x86/xen/enlighten.c
26133 +++ b/arch/x86/xen/enlighten.c
26134 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26135
26136 struct shared_info xen_dummy_shared_info;
26137
26138 -void *xen_initial_gdt;
26139 -
26140 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26141 __read_mostly int xen_have_vector_callback;
26142 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26143 @@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26144 #endif
26145 };
26146
26147 -static void xen_reboot(int reason)
26148 +static __noreturn void xen_reboot(int reason)
26149 {
26150 struct sched_shutdown r = { .reason = reason };
26151
26152 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
26153 - BUG();
26154 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
26155 + BUG();
26156 }
26157
26158 -static void xen_restart(char *msg)
26159 +static __noreturn void xen_restart(char *msg)
26160 {
26161 xen_reboot(SHUTDOWN_reboot);
26162 }
26163
26164 -static void xen_emergency_restart(void)
26165 +static __noreturn void xen_emergency_restart(void)
26166 {
26167 xen_reboot(SHUTDOWN_reboot);
26168 }
26169
26170 -static void xen_machine_halt(void)
26171 +static __noreturn void xen_machine_halt(void)
26172 {
26173 xen_reboot(SHUTDOWN_poweroff);
26174 }
26175
26176 -static void xen_machine_power_off(void)
26177 +static __noreturn void xen_machine_power_off(void)
26178 {
26179 if (pm_power_off)
26180 pm_power_off();
26181 @@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void)
26182 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26183
26184 /* Work out if we support NX */
26185 - x86_configure_nx();
26186 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26187 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26188 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26189 + unsigned l, h;
26190 +
26191 + __supported_pte_mask |= _PAGE_NX;
26192 + rdmsr(MSR_EFER, l, h);
26193 + l |= EFER_NX;
26194 + wrmsr(MSR_EFER, l, h);
26195 + }
26196 +#endif
26197
26198 xen_setup_features();
26199
26200 @@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void)
26201
26202 machine_ops = xen_machine_ops;
26203
26204 - /*
26205 - * The only reliable way to retain the initial address of the
26206 - * percpu gdt_page is to remember it here, so we can go and
26207 - * mark it RW later, when the initial percpu area is freed.
26208 - */
26209 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26210 -
26211 xen_smp_init();
26212
26213 #ifdef CONFIG_ACPI_NUMA
26214 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26215 index 95c1cf6..4bfa5be 100644
26216 --- a/arch/x86/xen/mmu.c
26217 +++ b/arch/x86/xen/mmu.c
26218 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26219 convert_pfn_mfn(init_level4_pgt);
26220 convert_pfn_mfn(level3_ident_pgt);
26221 convert_pfn_mfn(level3_kernel_pgt);
26222 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26223 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26224 + convert_pfn_mfn(level3_vmemmap_pgt);
26225
26226 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26227 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26228 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26229 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26230 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26231 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26232 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26233 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26234 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26235 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26236 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26237 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26238 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26239
26240 @@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void)
26241 pv_mmu_ops.set_pud = xen_set_pud;
26242 #if PAGETABLE_LEVELS == 4
26243 pv_mmu_ops.set_pgd = xen_set_pgd;
26244 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26245 #endif
26246
26247 /* This will work as long as patching hasn't happened yet
26248 @@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26249 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26250 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26251 .set_pgd = xen_set_pgd_hyper,
26252 + .set_pgd_batched = xen_set_pgd_hyper,
26253
26254 .alloc_pud = xen_alloc_pmd_init,
26255 .release_pud = xen_release_pmd_init,
26256 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26257 index 501d4e0..e877605 100644
26258 --- a/arch/x86/xen/smp.c
26259 +++ b/arch/x86/xen/smp.c
26260 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26261 {
26262 BUG_ON(smp_processor_id() != 0);
26263 native_smp_prepare_boot_cpu();
26264 -
26265 - /* We've switched to the "real" per-cpu gdt, so make sure the
26266 - old memory can be recycled */
26267 - make_lowmem_page_readwrite(xen_initial_gdt);
26268 -
26269 xen_filter_cpu_maps();
26270 xen_setup_vcpu_info_placement();
26271 }
26272 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26273 gdt = get_cpu_gdt_table(cpu);
26274
26275 ctxt->flags = VGCF_IN_KERNEL;
26276 - ctxt->user_regs.ds = __USER_DS;
26277 - ctxt->user_regs.es = __USER_DS;
26278 + ctxt->user_regs.ds = __KERNEL_DS;
26279 + ctxt->user_regs.es = __KERNEL_DS;
26280 ctxt->user_regs.ss = __KERNEL_DS;
26281 #ifdef CONFIG_X86_32
26282 ctxt->user_regs.fs = __KERNEL_PERCPU;
26283 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26284 + savesegment(gs, ctxt->user_regs.gs);
26285 #else
26286 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26287 #endif
26288 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26289 int rc;
26290
26291 per_cpu(current_task, cpu) = idle;
26292 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26293 #ifdef CONFIG_X86_32
26294 irq_ctx_init(cpu);
26295 #else
26296 clear_tsk_thread_flag(idle, TIF_FORK);
26297 - per_cpu(kernel_stack, cpu) =
26298 - (unsigned long)task_stack_page(idle) -
26299 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26300 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26301 #endif
26302 xen_setup_runstate_info(cpu);
26303 xen_setup_timer(cpu);
26304 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26305 index b040b0e..8cc4fe0 100644
26306 --- a/arch/x86/xen/xen-asm_32.S
26307 +++ b/arch/x86/xen/xen-asm_32.S
26308 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
26309 ESP_OFFSET=4 # bytes pushed onto stack
26310
26311 /*
26312 - * Store vcpu_info pointer for easy access. Do it this way to
26313 - * avoid having to reload %fs
26314 + * Store vcpu_info pointer for easy access.
26315 */
26316 #ifdef CONFIG_SMP
26317 - GET_THREAD_INFO(%eax)
26318 - movl TI_cpu(%eax), %eax
26319 - movl __per_cpu_offset(,%eax,4), %eax
26320 - mov xen_vcpu(%eax), %eax
26321 + push %fs
26322 + mov $(__KERNEL_PERCPU), %eax
26323 + mov %eax, %fs
26324 + mov PER_CPU_VAR(xen_vcpu), %eax
26325 + pop %fs
26326 #else
26327 movl xen_vcpu, %eax
26328 #endif
26329 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26330 index aaa7291..3f77960 100644
26331 --- a/arch/x86/xen/xen-head.S
26332 +++ b/arch/x86/xen/xen-head.S
26333 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
26334 #ifdef CONFIG_X86_32
26335 mov %esi,xen_start_info
26336 mov $init_thread_union+THREAD_SIZE,%esp
26337 +#ifdef CONFIG_SMP
26338 + movl $cpu_gdt_table,%edi
26339 + movl $__per_cpu_load,%eax
26340 + movw %ax,__KERNEL_PERCPU + 2(%edi)
26341 + rorl $16,%eax
26342 + movb %al,__KERNEL_PERCPU + 4(%edi)
26343 + movb %ah,__KERNEL_PERCPU + 7(%edi)
26344 + movl $__per_cpu_end - 1,%eax
26345 + subl $__per_cpu_start,%eax
26346 + movw %ax,__KERNEL_PERCPU + 0(%edi)
26347 +#endif
26348 #else
26349 mov %rsi,xen_start_info
26350 mov $init_thread_union+THREAD_SIZE,%rsp
26351 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26352 index b095739..8c17bcd 100644
26353 --- a/arch/x86/xen/xen-ops.h
26354 +++ b/arch/x86/xen/xen-ops.h
26355 @@ -10,8 +10,6 @@
26356 extern const char xen_hypervisor_callback[];
26357 extern const char xen_failsafe_callback[];
26358
26359 -extern void *xen_initial_gdt;
26360 -
26361 struct trap_info;
26362 void xen_copy_trap_info(struct trap_info *traps);
26363
26364 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26365 index 525bd3d..ef888b1 100644
26366 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
26367 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26368 @@ -119,9 +119,9 @@
26369 ----------------------------------------------------------------------*/
26370
26371 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26372 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26373 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26374 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26375 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26376
26377 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26378 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26379 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26380 index 2f33760..835e50a 100644
26381 --- a/arch/xtensa/variants/fsf/include/variant/core.h
26382 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
26383 @@ -11,6 +11,7 @@
26384 #ifndef _XTENSA_CORE_H
26385 #define _XTENSA_CORE_H
26386
26387 +#include <linux/const.h>
26388
26389 /****************************************************************************
26390 Parameters Useful for Any Code, USER or PRIVILEGED
26391 @@ -112,9 +113,9 @@
26392 ----------------------------------------------------------------------*/
26393
26394 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26395 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26396 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26397 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26398 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26399
26400 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26401 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26402 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26403 index af00795..2bb8105 100644
26404 --- a/arch/xtensa/variants/s6000/include/variant/core.h
26405 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
26406 @@ -11,6 +11,7 @@
26407 #ifndef _XTENSA_CORE_CONFIGURATION_H
26408 #define _XTENSA_CORE_CONFIGURATION_H
26409
26410 +#include <linux/const.h>
26411
26412 /****************************************************************************
26413 Parameters Useful for Any Code, USER or PRIVILEGED
26414 @@ -118,9 +119,9 @@
26415 ----------------------------------------------------------------------*/
26416
26417 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26418 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26419 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26420 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26421 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26422
26423 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26424 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26425 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26426 index 58916af..9cb880b 100644
26427 --- a/block/blk-iopoll.c
26428 +++ b/block/blk-iopoll.c
26429 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26430 }
26431 EXPORT_SYMBOL(blk_iopoll_complete);
26432
26433 -static void blk_iopoll_softirq(struct softirq_action *h)
26434 +static void blk_iopoll_softirq(void)
26435 {
26436 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26437 int rearm = 0, budget = blk_iopoll_budget;
26438 diff --git a/block/blk-map.c b/block/blk-map.c
26439 index 623e1cd..ca1e109 100644
26440 --- a/block/blk-map.c
26441 +++ b/block/blk-map.c
26442 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26443 if (!len || !kbuf)
26444 return -EINVAL;
26445
26446 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26447 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26448 if (do_copy)
26449 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26450 else
26451 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26452 index 1366a89..e17f54b 100644
26453 --- a/block/blk-softirq.c
26454 +++ b/block/blk-softirq.c
26455 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26456 * Softirq action handler - move entries to local list and loop over them
26457 * while passing them to the queue registered handler.
26458 */
26459 -static void blk_done_softirq(struct softirq_action *h)
26460 +static void blk_done_softirq(void)
26461 {
26462 struct list_head *cpu_list, local_list;
26463
26464 diff --git a/block/bsg.c b/block/bsg.c
26465 index ff64ae3..593560c 100644
26466 --- a/block/bsg.c
26467 +++ b/block/bsg.c
26468 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26469 struct sg_io_v4 *hdr, struct bsg_device *bd,
26470 fmode_t has_write_perm)
26471 {
26472 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26473 + unsigned char *cmdptr;
26474 +
26475 if (hdr->request_len > BLK_MAX_CDB) {
26476 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26477 if (!rq->cmd)
26478 return -ENOMEM;
26479 - }
26480 + cmdptr = rq->cmd;
26481 + } else
26482 + cmdptr = tmpcmd;
26483
26484 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26485 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26486 hdr->request_len))
26487 return -EFAULT;
26488
26489 + if (cmdptr != rq->cmd)
26490 + memcpy(rq->cmd, cmdptr, hdr->request_len);
26491 +
26492 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26493 if (blk_verify_command(rq->cmd, has_write_perm))
26494 return -EPERM;
26495 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26496 index 7c668c8..db3521c 100644
26497 --- a/block/compat_ioctl.c
26498 +++ b/block/compat_ioctl.c
26499 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26500 err |= __get_user(f->spec1, &uf->spec1);
26501 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26502 err |= __get_user(name, &uf->name);
26503 - f->name = compat_ptr(name);
26504 + f->name = (void __force_kernel *)compat_ptr(name);
26505 if (err) {
26506 err = -EFAULT;
26507 goto out;
26508 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
26509 index 6296b40..417c00f 100644
26510 --- a/block/partitions/efi.c
26511 +++ b/block/partitions/efi.c
26512 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
26513 if (!gpt)
26514 return NULL;
26515
26516 + if (!le32_to_cpu(gpt->num_partition_entries))
26517 + return NULL;
26518 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
26519 + if (!pte)
26520 + return NULL;
26521 +
26522 count = le32_to_cpu(gpt->num_partition_entries) *
26523 le32_to_cpu(gpt->sizeof_partition_entry);
26524 - if (!count)
26525 - return NULL;
26526 - pte = kzalloc(count, GFP_KERNEL);
26527 - if (!pte)
26528 - return NULL;
26529 -
26530 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
26531 (u8 *) pte,
26532 count) < count) {
26533 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26534 index 260fa80..e8f3caf 100644
26535 --- a/block/scsi_ioctl.c
26536 +++ b/block/scsi_ioctl.c
26537 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
26538 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
26539 struct sg_io_hdr *hdr, fmode_t mode)
26540 {
26541 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
26542 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26543 + unsigned char *cmdptr;
26544 +
26545 + if (rq->cmd != rq->__cmd)
26546 + cmdptr = rq->cmd;
26547 + else
26548 + cmdptr = tmpcmd;
26549 +
26550 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
26551 return -EFAULT;
26552 +
26553 + if (cmdptr != rq->cmd)
26554 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
26555 +
26556 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
26557 return -EPERM;
26558
26559 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26560 int err;
26561 unsigned int in_len, out_len, bytes, opcode, cmdlen;
26562 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
26563 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26564 + unsigned char *cmdptr;
26565
26566 if (!sic)
26567 return -EINVAL;
26568 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26569 */
26570 err = -EFAULT;
26571 rq->cmd_len = cmdlen;
26572 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
26573 +
26574 + if (rq->cmd != rq->__cmd)
26575 + cmdptr = rq->cmd;
26576 + else
26577 + cmdptr = tmpcmd;
26578 +
26579 + if (copy_from_user(cmdptr, sic->data, cmdlen))
26580 goto error;
26581
26582 + if (rq->cmd != cmdptr)
26583 + memcpy(rq->cmd, cmdptr, cmdlen);
26584 +
26585 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
26586 goto error;
26587
26588 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
26589 index 671d4d6..5f24030 100644
26590 --- a/crypto/cryptd.c
26591 +++ b/crypto/cryptd.c
26592 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
26593
26594 struct cryptd_blkcipher_request_ctx {
26595 crypto_completion_t complete;
26596 -};
26597 +} __no_const;
26598
26599 struct cryptd_hash_ctx {
26600 struct crypto_shash *child;
26601 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
26602
26603 struct cryptd_aead_request_ctx {
26604 crypto_completion_t complete;
26605 -};
26606 +} __no_const;
26607
26608 static void cryptd_queue_worker(struct work_struct *work);
26609
26610 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
26611 index 5d41894..22021e4 100644
26612 --- a/drivers/acpi/apei/cper.c
26613 +++ b/drivers/acpi/apei/cper.c
26614 @@ -38,12 +38,12 @@
26615 */
26616 u64 cper_next_record_id(void)
26617 {
26618 - static atomic64_t seq;
26619 + static atomic64_unchecked_t seq;
26620
26621 - if (!atomic64_read(&seq))
26622 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
26623 + if (!atomic64_read_unchecked(&seq))
26624 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
26625
26626 - return atomic64_inc_return(&seq);
26627 + return atomic64_inc_return_unchecked(&seq);
26628 }
26629 EXPORT_SYMBOL_GPL(cper_next_record_id);
26630
26631 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
26632 index b258cab..3fb7da7 100644
26633 --- a/drivers/acpi/ec_sys.c
26634 +++ b/drivers/acpi/ec_sys.c
26635 @@ -12,6 +12,7 @@
26636 #include <linux/acpi.h>
26637 #include <linux/debugfs.h>
26638 #include <linux/module.h>
26639 +#include <linux/uaccess.h>
26640 #include "internal.h"
26641
26642 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
26643 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26644 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
26645 */
26646 unsigned int size = EC_SPACE_SIZE;
26647 - u8 *data = (u8 *) buf;
26648 + u8 data;
26649 loff_t init_off = *off;
26650 int err = 0;
26651
26652 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26653 size = count;
26654
26655 while (size) {
26656 - err = ec_read(*off, &data[*off - init_off]);
26657 + err = ec_read(*off, &data);
26658 if (err)
26659 return err;
26660 + if (put_user(data, &buf[*off - init_off]))
26661 + return -EFAULT;
26662 *off += 1;
26663 size--;
26664 }
26665 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26666
26667 unsigned int size = count;
26668 loff_t init_off = *off;
26669 - u8 *data = (u8 *) buf;
26670 int err = 0;
26671
26672 if (*off >= EC_SPACE_SIZE)
26673 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26674 }
26675
26676 while (size) {
26677 - u8 byte_write = data[*off - init_off];
26678 + u8 byte_write;
26679 + if (get_user(byte_write, &buf[*off - init_off]))
26680 + return -EFAULT;
26681 err = ec_write(*off, byte_write);
26682 if (err)
26683 return err;
26684 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
26685 index 251c7b62..000462d 100644
26686 --- a/drivers/acpi/proc.c
26687 +++ b/drivers/acpi/proc.c
26688 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
26689 size_t count, loff_t * ppos)
26690 {
26691 struct list_head *node, *next;
26692 - char strbuf[5];
26693 - char str[5] = "";
26694 - unsigned int len = count;
26695 + char strbuf[5] = {0};
26696
26697 - if (len > 4)
26698 - len = 4;
26699 - if (len < 0)
26700 + if (count > 4)
26701 + count = 4;
26702 + if (copy_from_user(strbuf, buffer, count))
26703 return -EFAULT;
26704 -
26705 - if (copy_from_user(strbuf, buffer, len))
26706 - return -EFAULT;
26707 - strbuf[len] = '\0';
26708 - sscanf(strbuf, "%s", str);
26709 + strbuf[count] = '\0';
26710
26711 mutex_lock(&acpi_device_lock);
26712 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
26713 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
26714 if (!dev->wakeup.flags.valid)
26715 continue;
26716
26717 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
26718 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
26719 if (device_can_wakeup(&dev->dev)) {
26720 bool enable = !device_may_wakeup(&dev->dev);
26721 device_set_wakeup_enable(&dev->dev, enable);
26722 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
26723 index 8ae05ce..7dbbed9 100644
26724 --- a/drivers/acpi/processor_driver.c
26725 +++ b/drivers/acpi/processor_driver.c
26726 @@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
26727 return 0;
26728 #endif
26729
26730 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
26731 + BUG_ON(pr->id >= nr_cpu_ids);
26732
26733 /*
26734 * Buggy BIOS check
26735 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
26736 index c06e0ec..a2c06ba 100644
26737 --- a/drivers/ata/libata-core.c
26738 +++ b/drivers/ata/libata-core.c
26739 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
26740 struct ata_port *ap;
26741 unsigned int tag;
26742
26743 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26744 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26745 ap = qc->ap;
26746
26747 qc->flags = 0;
26748 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
26749 struct ata_port *ap;
26750 struct ata_link *link;
26751
26752 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26753 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26754 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
26755 ap = qc->ap;
26756 link = qc->dev->link;
26757 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26758 return;
26759
26760 spin_lock(&lock);
26761 + pax_open_kernel();
26762
26763 for (cur = ops->inherits; cur; cur = cur->inherits) {
26764 void **inherit = (void **)cur;
26765 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26766 if (IS_ERR(*pp))
26767 *pp = NULL;
26768
26769 - ops->inherits = NULL;
26770 + *(struct ata_port_operations **)&ops->inherits = NULL;
26771
26772 + pax_close_kernel();
26773 spin_unlock(&lock);
26774 }
26775
26776 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
26777 index 048589f..4002b98 100644
26778 --- a/drivers/ata/pata_arasan_cf.c
26779 +++ b/drivers/ata/pata_arasan_cf.c
26780 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
26781 /* Handle platform specific quirks */
26782 if (pdata->quirk) {
26783 if (pdata->quirk & CF_BROKEN_PIO) {
26784 - ap->ops->set_piomode = NULL;
26785 + pax_open_kernel();
26786 + *(void **)&ap->ops->set_piomode = NULL;
26787 + pax_close_kernel();
26788 ap->pio_mask = 0;
26789 }
26790 if (pdata->quirk & CF_BROKEN_MWDMA)
26791 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
26792 index f9b983a..887b9d8 100644
26793 --- a/drivers/atm/adummy.c
26794 +++ b/drivers/atm/adummy.c
26795 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
26796 vcc->pop(vcc, skb);
26797 else
26798 dev_kfree_skb_any(skb);
26799 - atomic_inc(&vcc->stats->tx);
26800 + atomic_inc_unchecked(&vcc->stats->tx);
26801
26802 return 0;
26803 }
26804 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
26805 index f8f41e0..1f987dd 100644
26806 --- a/drivers/atm/ambassador.c
26807 +++ b/drivers/atm/ambassador.c
26808 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
26809 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26810
26811 // VC layer stats
26812 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26813 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26814
26815 // free the descriptor
26816 kfree (tx_descr);
26817 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26818 dump_skb ("<<<", vc, skb);
26819
26820 // VC layer stats
26821 - atomic_inc(&atm_vcc->stats->rx);
26822 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26823 __net_timestamp(skb);
26824 // end of our responsibility
26825 atm_vcc->push (atm_vcc, skb);
26826 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26827 } else {
26828 PRINTK (KERN_INFO, "dropped over-size frame");
26829 // should we count this?
26830 - atomic_inc(&atm_vcc->stats->rx_drop);
26831 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26832 }
26833
26834 } else {
26835 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
26836 }
26837
26838 if (check_area (skb->data, skb->len)) {
26839 - atomic_inc(&atm_vcc->stats->tx_err);
26840 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26841 return -ENOMEM; // ?
26842 }
26843
26844 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
26845 index b22d71c..d6e1049 100644
26846 --- a/drivers/atm/atmtcp.c
26847 +++ b/drivers/atm/atmtcp.c
26848 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26849 if (vcc->pop) vcc->pop(vcc,skb);
26850 else dev_kfree_skb(skb);
26851 if (dev_data) return 0;
26852 - atomic_inc(&vcc->stats->tx_err);
26853 + atomic_inc_unchecked(&vcc->stats->tx_err);
26854 return -ENOLINK;
26855 }
26856 size = skb->len+sizeof(struct atmtcp_hdr);
26857 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26858 if (!new_skb) {
26859 if (vcc->pop) vcc->pop(vcc,skb);
26860 else dev_kfree_skb(skb);
26861 - atomic_inc(&vcc->stats->tx_err);
26862 + atomic_inc_unchecked(&vcc->stats->tx_err);
26863 return -ENOBUFS;
26864 }
26865 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26866 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26867 if (vcc->pop) vcc->pop(vcc,skb);
26868 else dev_kfree_skb(skb);
26869 out_vcc->push(out_vcc,new_skb);
26870 - atomic_inc(&vcc->stats->tx);
26871 - atomic_inc(&out_vcc->stats->rx);
26872 + atomic_inc_unchecked(&vcc->stats->tx);
26873 + atomic_inc_unchecked(&out_vcc->stats->rx);
26874 return 0;
26875 }
26876
26877 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26878 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26879 read_unlock(&vcc_sklist_lock);
26880 if (!out_vcc) {
26881 - atomic_inc(&vcc->stats->tx_err);
26882 + atomic_inc_unchecked(&vcc->stats->tx_err);
26883 goto done;
26884 }
26885 skb_pull(skb,sizeof(struct atmtcp_hdr));
26886 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26887 __net_timestamp(new_skb);
26888 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26889 out_vcc->push(out_vcc,new_skb);
26890 - atomic_inc(&vcc->stats->tx);
26891 - atomic_inc(&out_vcc->stats->rx);
26892 + atomic_inc_unchecked(&vcc->stats->tx);
26893 + atomic_inc_unchecked(&out_vcc->stats->rx);
26894 done:
26895 if (vcc->pop) vcc->pop(vcc,skb);
26896 else dev_kfree_skb(skb);
26897 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
26898 index 956e9ac..133516d 100644
26899 --- a/drivers/atm/eni.c
26900 +++ b/drivers/atm/eni.c
26901 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26902 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26903 vcc->dev->number);
26904 length = 0;
26905 - atomic_inc(&vcc->stats->rx_err);
26906 + atomic_inc_unchecked(&vcc->stats->rx_err);
26907 }
26908 else {
26909 length = ATM_CELL_SIZE-1; /* no HEC */
26910 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26911 size);
26912 }
26913 eff = length = 0;
26914 - atomic_inc(&vcc->stats->rx_err);
26915 + atomic_inc_unchecked(&vcc->stats->rx_err);
26916 }
26917 else {
26918 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26919 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26920 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26921 vcc->dev->number,vcc->vci,length,size << 2,descr);
26922 length = eff = 0;
26923 - atomic_inc(&vcc->stats->rx_err);
26924 + atomic_inc_unchecked(&vcc->stats->rx_err);
26925 }
26926 }
26927 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26928 @@ -771,7 +771,7 @@ rx_dequeued++;
26929 vcc->push(vcc,skb);
26930 pushed++;
26931 }
26932 - atomic_inc(&vcc->stats->rx);
26933 + atomic_inc_unchecked(&vcc->stats->rx);
26934 }
26935 wake_up(&eni_dev->rx_wait);
26936 }
26937 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
26938 PCI_DMA_TODEVICE);
26939 if (vcc->pop) vcc->pop(vcc,skb);
26940 else dev_kfree_skb_irq(skb);
26941 - atomic_inc(&vcc->stats->tx);
26942 + atomic_inc_unchecked(&vcc->stats->tx);
26943 wake_up(&eni_dev->tx_wait);
26944 dma_complete++;
26945 }
26946 @@ -1569,7 +1569,7 @@ tx_complete++;
26947 /*--------------------------------- entries ---------------------------------*/
26948
26949
26950 -static const char *media_name[] __devinitdata = {
26951 +static const char *media_name[] __devinitconst = {
26952 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
26953 "UTP", "05?", "06?", "07?", /* 4- 7 */
26954 "TAXI","09?", "10?", "11?", /* 8-11 */
26955 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
26956 index 5072f8a..fa52520d 100644
26957 --- a/drivers/atm/firestream.c
26958 +++ b/drivers/atm/firestream.c
26959 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
26960 }
26961 }
26962
26963 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26964 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26965
26966 fs_dprintk (FS_DEBUG_TXMEM, "i");
26967 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26968 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26969 #endif
26970 skb_put (skb, qe->p1 & 0xffff);
26971 ATM_SKB(skb)->vcc = atm_vcc;
26972 - atomic_inc(&atm_vcc->stats->rx);
26973 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26974 __net_timestamp(skb);
26975 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26976 atm_vcc->push (atm_vcc, skb);
26977 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26978 kfree (pe);
26979 }
26980 if (atm_vcc)
26981 - atomic_inc(&atm_vcc->stats->rx_drop);
26982 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26983 break;
26984 case 0x1f: /* Reassembly abort: no buffers. */
26985 /* Silently increment error counter. */
26986 if (atm_vcc)
26987 - atomic_inc(&atm_vcc->stats->rx_drop);
26988 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26989 break;
26990 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26991 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26992 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
26993 index 361f5ae..7fc552d 100644
26994 --- a/drivers/atm/fore200e.c
26995 +++ b/drivers/atm/fore200e.c
26996 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
26997 #endif
26998 /* check error condition */
26999 if (*entry->status & STATUS_ERROR)
27000 - atomic_inc(&vcc->stats->tx_err);
27001 + atomic_inc_unchecked(&vcc->stats->tx_err);
27002 else
27003 - atomic_inc(&vcc->stats->tx);
27004 + atomic_inc_unchecked(&vcc->stats->tx);
27005 }
27006 }
27007
27008 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27009 if (skb == NULL) {
27010 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27011
27012 - atomic_inc(&vcc->stats->rx_drop);
27013 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27014 return -ENOMEM;
27015 }
27016
27017 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27018
27019 dev_kfree_skb_any(skb);
27020
27021 - atomic_inc(&vcc->stats->rx_drop);
27022 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27023 return -ENOMEM;
27024 }
27025
27026 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27027
27028 vcc->push(vcc, skb);
27029 - atomic_inc(&vcc->stats->rx);
27030 + atomic_inc_unchecked(&vcc->stats->rx);
27031
27032 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27033
27034 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27035 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27036 fore200e->atm_dev->number,
27037 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27038 - atomic_inc(&vcc->stats->rx_err);
27039 + atomic_inc_unchecked(&vcc->stats->rx_err);
27040 }
27041 }
27042
27043 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27044 goto retry_here;
27045 }
27046
27047 - atomic_inc(&vcc->stats->tx_err);
27048 + atomic_inc_unchecked(&vcc->stats->tx_err);
27049
27050 fore200e->tx_sat++;
27051 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27052 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27053 index b182c2f..1c6fa8a 100644
27054 --- a/drivers/atm/he.c
27055 +++ b/drivers/atm/he.c
27056 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27057
27058 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27059 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27060 - atomic_inc(&vcc->stats->rx_drop);
27061 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27062 goto return_host_buffers;
27063 }
27064
27065 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27066 RBRQ_LEN_ERR(he_dev->rbrq_head)
27067 ? "LEN_ERR" : "",
27068 vcc->vpi, vcc->vci);
27069 - atomic_inc(&vcc->stats->rx_err);
27070 + atomic_inc_unchecked(&vcc->stats->rx_err);
27071 goto return_host_buffers;
27072 }
27073
27074 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27075 vcc->push(vcc, skb);
27076 spin_lock(&he_dev->global_lock);
27077
27078 - atomic_inc(&vcc->stats->rx);
27079 + atomic_inc_unchecked(&vcc->stats->rx);
27080
27081 return_host_buffers:
27082 ++pdus_assembled;
27083 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27084 tpd->vcc->pop(tpd->vcc, tpd->skb);
27085 else
27086 dev_kfree_skb_any(tpd->skb);
27087 - atomic_inc(&tpd->vcc->stats->tx_err);
27088 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27089 }
27090 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27091 return;
27092 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27093 vcc->pop(vcc, skb);
27094 else
27095 dev_kfree_skb_any(skb);
27096 - atomic_inc(&vcc->stats->tx_err);
27097 + atomic_inc_unchecked(&vcc->stats->tx_err);
27098 return -EINVAL;
27099 }
27100
27101 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27102 vcc->pop(vcc, skb);
27103 else
27104 dev_kfree_skb_any(skb);
27105 - atomic_inc(&vcc->stats->tx_err);
27106 + atomic_inc_unchecked(&vcc->stats->tx_err);
27107 return -EINVAL;
27108 }
27109 #endif
27110 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27111 vcc->pop(vcc, skb);
27112 else
27113 dev_kfree_skb_any(skb);
27114 - atomic_inc(&vcc->stats->tx_err);
27115 + atomic_inc_unchecked(&vcc->stats->tx_err);
27116 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27117 return -ENOMEM;
27118 }
27119 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27120 vcc->pop(vcc, skb);
27121 else
27122 dev_kfree_skb_any(skb);
27123 - atomic_inc(&vcc->stats->tx_err);
27124 + atomic_inc_unchecked(&vcc->stats->tx_err);
27125 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27126 return -ENOMEM;
27127 }
27128 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27129 __enqueue_tpd(he_dev, tpd, cid);
27130 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27131
27132 - atomic_inc(&vcc->stats->tx);
27133 + atomic_inc_unchecked(&vcc->stats->tx);
27134
27135 return 0;
27136 }
27137 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27138 index b812103..e391a49 100644
27139 --- a/drivers/atm/horizon.c
27140 +++ b/drivers/atm/horizon.c
27141 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27142 {
27143 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27144 // VC layer stats
27145 - atomic_inc(&vcc->stats->rx);
27146 + atomic_inc_unchecked(&vcc->stats->rx);
27147 __net_timestamp(skb);
27148 // end of our responsibility
27149 vcc->push (vcc, skb);
27150 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27151 dev->tx_iovec = NULL;
27152
27153 // VC layer stats
27154 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27155 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27156
27157 // free the skb
27158 hrz_kfree_skb (skb);
27159 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27160 index 1c05212..c28e200 100644
27161 --- a/drivers/atm/idt77252.c
27162 +++ b/drivers/atm/idt77252.c
27163 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27164 else
27165 dev_kfree_skb(skb);
27166
27167 - atomic_inc(&vcc->stats->tx);
27168 + atomic_inc_unchecked(&vcc->stats->tx);
27169 }
27170
27171 atomic_dec(&scq->used);
27172 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27173 if ((sb = dev_alloc_skb(64)) == NULL) {
27174 printk("%s: Can't allocate buffers for aal0.\n",
27175 card->name);
27176 - atomic_add(i, &vcc->stats->rx_drop);
27177 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27178 break;
27179 }
27180 if (!atm_charge(vcc, sb->truesize)) {
27181 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27182 card->name);
27183 - atomic_add(i - 1, &vcc->stats->rx_drop);
27184 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27185 dev_kfree_skb(sb);
27186 break;
27187 }
27188 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27189 ATM_SKB(sb)->vcc = vcc;
27190 __net_timestamp(sb);
27191 vcc->push(vcc, sb);
27192 - atomic_inc(&vcc->stats->rx);
27193 + atomic_inc_unchecked(&vcc->stats->rx);
27194
27195 cell += ATM_CELL_PAYLOAD;
27196 }
27197 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27198 "(CDC: %08x)\n",
27199 card->name, len, rpp->len, readl(SAR_REG_CDC));
27200 recycle_rx_pool_skb(card, rpp);
27201 - atomic_inc(&vcc->stats->rx_err);
27202 + atomic_inc_unchecked(&vcc->stats->rx_err);
27203 return;
27204 }
27205 if (stat & SAR_RSQE_CRC) {
27206 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27207 recycle_rx_pool_skb(card, rpp);
27208 - atomic_inc(&vcc->stats->rx_err);
27209 + atomic_inc_unchecked(&vcc->stats->rx_err);
27210 return;
27211 }
27212 if (skb_queue_len(&rpp->queue) > 1) {
27213 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27214 RXPRINTK("%s: Can't alloc RX skb.\n",
27215 card->name);
27216 recycle_rx_pool_skb(card, rpp);
27217 - atomic_inc(&vcc->stats->rx_err);
27218 + atomic_inc_unchecked(&vcc->stats->rx_err);
27219 return;
27220 }
27221 if (!atm_charge(vcc, skb->truesize)) {
27222 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27223 __net_timestamp(skb);
27224
27225 vcc->push(vcc, skb);
27226 - atomic_inc(&vcc->stats->rx);
27227 + atomic_inc_unchecked(&vcc->stats->rx);
27228
27229 return;
27230 }
27231 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27232 __net_timestamp(skb);
27233
27234 vcc->push(vcc, skb);
27235 - atomic_inc(&vcc->stats->rx);
27236 + atomic_inc_unchecked(&vcc->stats->rx);
27237
27238 if (skb->truesize > SAR_FB_SIZE_3)
27239 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27240 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27241 if (vcc->qos.aal != ATM_AAL0) {
27242 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27243 card->name, vpi, vci);
27244 - atomic_inc(&vcc->stats->rx_drop);
27245 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27246 goto drop;
27247 }
27248
27249 if ((sb = dev_alloc_skb(64)) == NULL) {
27250 printk("%s: Can't allocate buffers for AAL0.\n",
27251 card->name);
27252 - atomic_inc(&vcc->stats->rx_err);
27253 + atomic_inc_unchecked(&vcc->stats->rx_err);
27254 goto drop;
27255 }
27256
27257 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27258 ATM_SKB(sb)->vcc = vcc;
27259 __net_timestamp(sb);
27260 vcc->push(vcc, sb);
27261 - atomic_inc(&vcc->stats->rx);
27262 + atomic_inc_unchecked(&vcc->stats->rx);
27263
27264 drop:
27265 skb_pull(queue, 64);
27266 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27267
27268 if (vc == NULL) {
27269 printk("%s: NULL connection in send().\n", card->name);
27270 - atomic_inc(&vcc->stats->tx_err);
27271 + atomic_inc_unchecked(&vcc->stats->tx_err);
27272 dev_kfree_skb(skb);
27273 return -EINVAL;
27274 }
27275 if (!test_bit(VCF_TX, &vc->flags)) {
27276 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27277 - atomic_inc(&vcc->stats->tx_err);
27278 + atomic_inc_unchecked(&vcc->stats->tx_err);
27279 dev_kfree_skb(skb);
27280 return -EINVAL;
27281 }
27282 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27283 break;
27284 default:
27285 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27286 - atomic_inc(&vcc->stats->tx_err);
27287 + atomic_inc_unchecked(&vcc->stats->tx_err);
27288 dev_kfree_skb(skb);
27289 return -EINVAL;
27290 }
27291
27292 if (skb_shinfo(skb)->nr_frags != 0) {
27293 printk("%s: No scatter-gather yet.\n", card->name);
27294 - atomic_inc(&vcc->stats->tx_err);
27295 + atomic_inc_unchecked(&vcc->stats->tx_err);
27296 dev_kfree_skb(skb);
27297 return -EINVAL;
27298 }
27299 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27300
27301 err = queue_skb(card, vc, skb, oam);
27302 if (err) {
27303 - atomic_inc(&vcc->stats->tx_err);
27304 + atomic_inc_unchecked(&vcc->stats->tx_err);
27305 dev_kfree_skb(skb);
27306 return err;
27307 }
27308 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27309 skb = dev_alloc_skb(64);
27310 if (!skb) {
27311 printk("%s: Out of memory in send_oam().\n", card->name);
27312 - atomic_inc(&vcc->stats->tx_err);
27313 + atomic_inc_unchecked(&vcc->stats->tx_err);
27314 return -ENOMEM;
27315 }
27316 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27317 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27318 index 9e373ba..cf93727 100644
27319 --- a/drivers/atm/iphase.c
27320 +++ b/drivers/atm/iphase.c
27321 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27322 status = (u_short) (buf_desc_ptr->desc_mode);
27323 if (status & (RX_CER | RX_PTE | RX_OFL))
27324 {
27325 - atomic_inc(&vcc->stats->rx_err);
27326 + atomic_inc_unchecked(&vcc->stats->rx_err);
27327 IF_ERR(printk("IA: bad packet, dropping it");)
27328 if (status & RX_CER) {
27329 IF_ERR(printk(" cause: packet CRC error\n");)
27330 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27331 len = dma_addr - buf_addr;
27332 if (len > iadev->rx_buf_sz) {
27333 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27334 - atomic_inc(&vcc->stats->rx_err);
27335 + atomic_inc_unchecked(&vcc->stats->rx_err);
27336 goto out_free_desc;
27337 }
27338
27339 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27340 ia_vcc = INPH_IA_VCC(vcc);
27341 if (ia_vcc == NULL)
27342 {
27343 - atomic_inc(&vcc->stats->rx_err);
27344 + atomic_inc_unchecked(&vcc->stats->rx_err);
27345 atm_return(vcc, skb->truesize);
27346 dev_kfree_skb_any(skb);
27347 goto INCR_DLE;
27348 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27349 if ((length > iadev->rx_buf_sz) || (length >
27350 (skb->len - sizeof(struct cpcs_trailer))))
27351 {
27352 - atomic_inc(&vcc->stats->rx_err);
27353 + atomic_inc_unchecked(&vcc->stats->rx_err);
27354 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27355 length, skb->len);)
27356 atm_return(vcc, skb->truesize);
27357 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27358
27359 IF_RX(printk("rx_dle_intr: skb push");)
27360 vcc->push(vcc,skb);
27361 - atomic_inc(&vcc->stats->rx);
27362 + atomic_inc_unchecked(&vcc->stats->rx);
27363 iadev->rx_pkt_cnt++;
27364 }
27365 INCR_DLE:
27366 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27367 {
27368 struct k_sonet_stats *stats;
27369 stats = &PRIV(_ia_dev[board])->sonet_stats;
27370 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27371 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27372 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27373 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27374 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27375 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27376 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27377 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27378 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27379 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27380 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27381 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27382 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27383 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27384 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27385 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27386 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27387 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27388 }
27389 ia_cmds.status = 0;
27390 break;
27391 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27392 if ((desc == 0) || (desc > iadev->num_tx_desc))
27393 {
27394 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27395 - atomic_inc(&vcc->stats->tx);
27396 + atomic_inc_unchecked(&vcc->stats->tx);
27397 if (vcc->pop)
27398 vcc->pop(vcc, skb);
27399 else
27400 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27401 ATM_DESC(skb) = vcc->vci;
27402 skb_queue_tail(&iadev->tx_dma_q, skb);
27403
27404 - atomic_inc(&vcc->stats->tx);
27405 + atomic_inc_unchecked(&vcc->stats->tx);
27406 iadev->tx_pkt_cnt++;
27407 /* Increment transaction counter */
27408 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27409
27410 #if 0
27411 /* add flow control logic */
27412 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27413 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27414 if (iavcc->vc_desc_cnt > 10) {
27415 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27416 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27417 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27418 index f556969..0da15eb 100644
27419 --- a/drivers/atm/lanai.c
27420 +++ b/drivers/atm/lanai.c
27421 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27422 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27423 lanai_endtx(lanai, lvcc);
27424 lanai_free_skb(lvcc->tx.atmvcc, skb);
27425 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27426 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27427 }
27428
27429 /* Try to fill the buffer - don't call unless there is backlog */
27430 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
27431 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
27432 __net_timestamp(skb);
27433 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
27434 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
27435 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
27436 out:
27437 lvcc->rx.buf.ptr = end;
27438 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
27439 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27440 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
27441 "vcc %d\n", lanai->number, (unsigned int) s, vci);
27442 lanai->stats.service_rxnotaal5++;
27443 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27444 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27445 return 0;
27446 }
27447 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
27448 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27449 int bytes;
27450 read_unlock(&vcc_sklist_lock);
27451 DPRINTK("got trashed rx pdu on vci %d\n", vci);
27452 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27453 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27454 lvcc->stats.x.aal5.service_trash++;
27455 bytes = (SERVICE_GET_END(s) * 16) -
27456 (((unsigned long) lvcc->rx.buf.ptr) -
27457 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27458 }
27459 if (s & SERVICE_STREAM) {
27460 read_unlock(&vcc_sklist_lock);
27461 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27462 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27463 lvcc->stats.x.aal5.service_stream++;
27464 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
27465 "PDU on VCI %d!\n", lanai->number, vci);
27466 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27467 return 0;
27468 }
27469 DPRINTK("got rx crc error on vci %d\n", vci);
27470 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27471 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27472 lvcc->stats.x.aal5.service_rxcrc++;
27473 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
27474 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
27475 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
27476 index 1c70c45..300718d 100644
27477 --- a/drivers/atm/nicstar.c
27478 +++ b/drivers/atm/nicstar.c
27479 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27480 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
27481 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
27482 card->index);
27483 - atomic_inc(&vcc->stats->tx_err);
27484 + atomic_inc_unchecked(&vcc->stats->tx_err);
27485 dev_kfree_skb_any(skb);
27486 return -EINVAL;
27487 }
27488 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27489 if (!vc->tx) {
27490 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
27491 card->index);
27492 - atomic_inc(&vcc->stats->tx_err);
27493 + atomic_inc_unchecked(&vcc->stats->tx_err);
27494 dev_kfree_skb_any(skb);
27495 return -EINVAL;
27496 }
27497 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27498 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
27499 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
27500 card->index);
27501 - atomic_inc(&vcc->stats->tx_err);
27502 + atomic_inc_unchecked(&vcc->stats->tx_err);
27503 dev_kfree_skb_any(skb);
27504 return -EINVAL;
27505 }
27506
27507 if (skb_shinfo(skb)->nr_frags != 0) {
27508 printk("nicstar%d: No scatter-gather yet.\n", card->index);
27509 - atomic_inc(&vcc->stats->tx_err);
27510 + atomic_inc_unchecked(&vcc->stats->tx_err);
27511 dev_kfree_skb_any(skb);
27512 return -EINVAL;
27513 }
27514 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27515 }
27516
27517 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
27518 - atomic_inc(&vcc->stats->tx_err);
27519 + atomic_inc_unchecked(&vcc->stats->tx_err);
27520 dev_kfree_skb_any(skb);
27521 return -EIO;
27522 }
27523 - atomic_inc(&vcc->stats->tx);
27524 + atomic_inc_unchecked(&vcc->stats->tx);
27525
27526 return 0;
27527 }
27528 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27529 printk
27530 ("nicstar%d: Can't allocate buffers for aal0.\n",
27531 card->index);
27532 - atomic_add(i, &vcc->stats->rx_drop);
27533 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27534 break;
27535 }
27536 if (!atm_charge(vcc, sb->truesize)) {
27537 RXPRINTK
27538 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
27539 card->index);
27540 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27541 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27542 dev_kfree_skb_any(sb);
27543 break;
27544 }
27545 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27546 ATM_SKB(sb)->vcc = vcc;
27547 __net_timestamp(sb);
27548 vcc->push(vcc, sb);
27549 - atomic_inc(&vcc->stats->rx);
27550 + atomic_inc_unchecked(&vcc->stats->rx);
27551 cell += ATM_CELL_PAYLOAD;
27552 }
27553
27554 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27555 if (iovb == NULL) {
27556 printk("nicstar%d: Out of iovec buffers.\n",
27557 card->index);
27558 - atomic_inc(&vcc->stats->rx_drop);
27559 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27560 recycle_rx_buf(card, skb);
27561 return;
27562 }
27563 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27564 small or large buffer itself. */
27565 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
27566 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
27567 - atomic_inc(&vcc->stats->rx_err);
27568 + atomic_inc_unchecked(&vcc->stats->rx_err);
27569 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27570 NS_MAX_IOVECS);
27571 NS_PRV_IOVCNT(iovb) = 0;
27572 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27573 ("nicstar%d: Expected a small buffer, and this is not one.\n",
27574 card->index);
27575 which_list(card, skb);
27576 - atomic_inc(&vcc->stats->rx_err);
27577 + atomic_inc_unchecked(&vcc->stats->rx_err);
27578 recycle_rx_buf(card, skb);
27579 vc->rx_iov = NULL;
27580 recycle_iov_buf(card, iovb);
27581 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27582 ("nicstar%d: Expected a large buffer, and this is not one.\n",
27583 card->index);
27584 which_list(card, skb);
27585 - atomic_inc(&vcc->stats->rx_err);
27586 + atomic_inc_unchecked(&vcc->stats->rx_err);
27587 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27588 NS_PRV_IOVCNT(iovb));
27589 vc->rx_iov = NULL;
27590 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27591 printk(" - PDU size mismatch.\n");
27592 else
27593 printk(".\n");
27594 - atomic_inc(&vcc->stats->rx_err);
27595 + atomic_inc_unchecked(&vcc->stats->rx_err);
27596 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27597 NS_PRV_IOVCNT(iovb));
27598 vc->rx_iov = NULL;
27599 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27600 /* skb points to a small buffer */
27601 if (!atm_charge(vcc, skb->truesize)) {
27602 push_rxbufs(card, skb);
27603 - atomic_inc(&vcc->stats->rx_drop);
27604 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27605 } else {
27606 skb_put(skb, len);
27607 dequeue_sm_buf(card, skb);
27608 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27609 ATM_SKB(skb)->vcc = vcc;
27610 __net_timestamp(skb);
27611 vcc->push(vcc, skb);
27612 - atomic_inc(&vcc->stats->rx);
27613 + atomic_inc_unchecked(&vcc->stats->rx);
27614 }
27615 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
27616 struct sk_buff *sb;
27617 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27618 if (len <= NS_SMBUFSIZE) {
27619 if (!atm_charge(vcc, sb->truesize)) {
27620 push_rxbufs(card, sb);
27621 - atomic_inc(&vcc->stats->rx_drop);
27622 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27623 } else {
27624 skb_put(sb, len);
27625 dequeue_sm_buf(card, sb);
27626 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27627 ATM_SKB(sb)->vcc = vcc;
27628 __net_timestamp(sb);
27629 vcc->push(vcc, sb);
27630 - atomic_inc(&vcc->stats->rx);
27631 + atomic_inc_unchecked(&vcc->stats->rx);
27632 }
27633
27634 push_rxbufs(card, skb);
27635 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27636
27637 if (!atm_charge(vcc, skb->truesize)) {
27638 push_rxbufs(card, skb);
27639 - atomic_inc(&vcc->stats->rx_drop);
27640 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27641 } else {
27642 dequeue_lg_buf(card, skb);
27643 #ifdef NS_USE_DESTRUCTORS
27644 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27645 ATM_SKB(skb)->vcc = vcc;
27646 __net_timestamp(skb);
27647 vcc->push(vcc, skb);
27648 - atomic_inc(&vcc->stats->rx);
27649 + atomic_inc_unchecked(&vcc->stats->rx);
27650 }
27651
27652 push_rxbufs(card, sb);
27653 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27654 printk
27655 ("nicstar%d: Out of huge buffers.\n",
27656 card->index);
27657 - atomic_inc(&vcc->stats->rx_drop);
27658 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27659 recycle_iovec_rx_bufs(card,
27660 (struct iovec *)
27661 iovb->data,
27662 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27663 card->hbpool.count++;
27664 } else
27665 dev_kfree_skb_any(hb);
27666 - atomic_inc(&vcc->stats->rx_drop);
27667 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27668 } else {
27669 /* Copy the small buffer to the huge buffer */
27670 sb = (struct sk_buff *)iov->iov_base;
27671 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27672 #endif /* NS_USE_DESTRUCTORS */
27673 __net_timestamp(hb);
27674 vcc->push(vcc, hb);
27675 - atomic_inc(&vcc->stats->rx);
27676 + atomic_inc_unchecked(&vcc->stats->rx);
27677 }
27678 }
27679
27680 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
27681 index e8cd652..bbbd1fc 100644
27682 --- a/drivers/atm/solos-pci.c
27683 +++ b/drivers/atm/solos-pci.c
27684 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
27685 }
27686 atm_charge(vcc, skb->truesize);
27687 vcc->push(vcc, skb);
27688 - atomic_inc(&vcc->stats->rx);
27689 + atomic_inc_unchecked(&vcc->stats->rx);
27690 break;
27691
27692 case PKT_STATUS:
27693 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
27694 vcc = SKB_CB(oldskb)->vcc;
27695
27696 if (vcc) {
27697 - atomic_inc(&vcc->stats->tx);
27698 + atomic_inc_unchecked(&vcc->stats->tx);
27699 solos_pop(vcc, oldskb);
27700 } else
27701 dev_kfree_skb_irq(oldskb);
27702 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
27703 index 90f1ccc..04c4a1e 100644
27704 --- a/drivers/atm/suni.c
27705 +++ b/drivers/atm/suni.c
27706 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27707
27708
27709 #define ADD_LIMITED(s,v) \
27710 - atomic_add((v),&stats->s); \
27711 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27712 + atomic_add_unchecked((v),&stats->s); \
27713 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27714
27715
27716 static void suni_hz(unsigned long from_timer)
27717 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
27718 index 5120a96..e2572bd 100644
27719 --- a/drivers/atm/uPD98402.c
27720 +++ b/drivers/atm/uPD98402.c
27721 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
27722 struct sonet_stats tmp;
27723 int error = 0;
27724
27725 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27726 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27727 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27728 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27729 if (zero && !error) {
27730 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
27731
27732
27733 #define ADD_LIMITED(s,v) \
27734 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27735 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27736 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27737 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27738 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27739 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27740
27741
27742 static void stat_event(struct atm_dev *dev)
27743 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
27744 if (reason & uPD98402_INT_PFM) stat_event(dev);
27745 if (reason & uPD98402_INT_PCO) {
27746 (void) GET(PCOCR); /* clear interrupt cause */
27747 - atomic_add(GET(HECCT),
27748 + atomic_add_unchecked(GET(HECCT),
27749 &PRIV(dev)->sonet_stats.uncorr_hcs);
27750 }
27751 if ((reason & uPD98402_INT_RFO) &&
27752 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
27753 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27754 uPD98402_INT_LOS),PIMR); /* enable them */
27755 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27756 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27757 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27758 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27759 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27760 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27761 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27762 return 0;
27763 }
27764
27765 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
27766 index d889f56..17eb71e 100644
27767 --- a/drivers/atm/zatm.c
27768 +++ b/drivers/atm/zatm.c
27769 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27770 }
27771 if (!size) {
27772 dev_kfree_skb_irq(skb);
27773 - if (vcc) atomic_inc(&vcc->stats->rx_err);
27774 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27775 continue;
27776 }
27777 if (!atm_charge(vcc,skb->truesize)) {
27778 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27779 skb->len = size;
27780 ATM_SKB(skb)->vcc = vcc;
27781 vcc->push(vcc,skb);
27782 - atomic_inc(&vcc->stats->rx);
27783 + atomic_inc_unchecked(&vcc->stats->rx);
27784 }
27785 zout(pos & 0xffff,MTA(mbx));
27786 #if 0 /* probably a stupid idea */
27787 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
27788 skb_queue_head(&zatm_vcc->backlog,skb);
27789 break;
27790 }
27791 - atomic_inc(&vcc->stats->tx);
27792 + atomic_inc_unchecked(&vcc->stats->tx);
27793 wake_up(&zatm_vcc->tx_wait);
27794 }
27795
27796 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
27797 index 8493536..31adee0 100644
27798 --- a/drivers/base/devtmpfs.c
27799 +++ b/drivers/base/devtmpfs.c
27800 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
27801 if (!thread)
27802 return 0;
27803
27804 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
27805 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
27806 if (err)
27807 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
27808 else
27809 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
27810 index caf995f..6f76697 100644
27811 --- a/drivers/base/power/wakeup.c
27812 +++ b/drivers/base/power/wakeup.c
27813 @@ -30,14 +30,14 @@ bool events_check_enabled;
27814 * They need to be modified together atomically, so it's better to use one
27815 * atomic variable to hold them both.
27816 */
27817 -static atomic_t combined_event_count = ATOMIC_INIT(0);
27818 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
27819
27820 #define IN_PROGRESS_BITS (sizeof(int) * 4)
27821 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
27822
27823 static void split_counters(unsigned int *cnt, unsigned int *inpr)
27824 {
27825 - unsigned int comb = atomic_read(&combined_event_count);
27826 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
27827
27828 *cnt = (comb >> IN_PROGRESS_BITS);
27829 *inpr = comb & MAX_IN_PROGRESS;
27830 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
27831 ws->last_time = ktime_get();
27832
27833 /* Increment the counter of events in progress. */
27834 - atomic_inc(&combined_event_count);
27835 + atomic_inc_unchecked(&combined_event_count);
27836 }
27837
27838 /**
27839 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
27840 * Increment the counter of registered wakeup events and decrement the
27841 * couter of wakeup events in progress simultaneously.
27842 */
27843 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
27844 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
27845 }
27846
27847 /**
27848 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
27849 index b0f553b..77b928b 100644
27850 --- a/drivers/block/cciss.c
27851 +++ b/drivers/block/cciss.c
27852 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
27853 int err;
27854 u32 cp;
27855
27856 + memset(&arg64, 0, sizeof(arg64));
27857 +
27858 err = 0;
27859 err |=
27860 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27861 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
27862 while (!list_empty(&h->reqQ)) {
27863 c = list_entry(h->reqQ.next, CommandList_struct, list);
27864 /* can't do anything if fifo is full */
27865 - if ((h->access.fifo_full(h))) {
27866 + if ((h->access->fifo_full(h))) {
27867 dev_warn(&h->pdev->dev, "fifo full\n");
27868 break;
27869 }
27870 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
27871 h->Qdepth--;
27872
27873 /* Tell the controller execute command */
27874 - h->access.submit_command(h, c);
27875 + h->access->submit_command(h, c);
27876
27877 /* Put job onto the completed Q */
27878 addQ(&h->cmpQ, c);
27879 @@ -3443,17 +3445,17 @@ startio:
27880
27881 static inline unsigned long get_next_completion(ctlr_info_t *h)
27882 {
27883 - return h->access.command_completed(h);
27884 + return h->access->command_completed(h);
27885 }
27886
27887 static inline int interrupt_pending(ctlr_info_t *h)
27888 {
27889 - return h->access.intr_pending(h);
27890 + return h->access->intr_pending(h);
27891 }
27892
27893 static inline long interrupt_not_for_us(ctlr_info_t *h)
27894 {
27895 - return ((h->access.intr_pending(h) == 0) ||
27896 + return ((h->access->intr_pending(h) == 0) ||
27897 (h->interrupts_enabled == 0));
27898 }
27899
27900 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
27901 u32 a;
27902
27903 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
27904 - return h->access.command_completed(h);
27905 + return h->access->command_completed(h);
27906
27907 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
27908 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
27909 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
27910 trans_support & CFGTBL_Trans_use_short_tags);
27911
27912 /* Change the access methods to the performant access methods */
27913 - h->access = SA5_performant_access;
27914 + h->access = &SA5_performant_access;
27915 h->transMethod = CFGTBL_Trans_Performant;
27916
27917 return;
27918 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
27919 if (prod_index < 0)
27920 return -ENODEV;
27921 h->product_name = products[prod_index].product_name;
27922 - h->access = *(products[prod_index].access);
27923 + h->access = products[prod_index].access;
27924
27925 if (cciss_board_disabled(h)) {
27926 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
27927 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
27928 }
27929
27930 /* make sure the board interrupts are off */
27931 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27932 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27933 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
27934 if (rc)
27935 goto clean2;
27936 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
27937 * fake ones to scoop up any residual completions.
27938 */
27939 spin_lock_irqsave(&h->lock, flags);
27940 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27941 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27942 spin_unlock_irqrestore(&h->lock, flags);
27943 free_irq(h->intr[h->intr_mode], h);
27944 rc = cciss_request_irq(h, cciss_msix_discard_completions,
27945 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
27946 dev_info(&h->pdev->dev, "Board READY.\n");
27947 dev_info(&h->pdev->dev,
27948 "Waiting for stale completions to drain.\n");
27949 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27950 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27951 msleep(10000);
27952 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27953 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27954
27955 rc = controller_reset_failed(h->cfgtable);
27956 if (rc)
27957 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
27958 cciss_scsi_setup(h);
27959
27960 /* Turn the interrupts on so we can service requests */
27961 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27962 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27963
27964 /* Get the firmware version */
27965 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27966 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
27967 kfree(flush_buf);
27968 if (return_code != IO_OK)
27969 dev_warn(&h->pdev->dev, "Error flushing cache\n");
27970 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27971 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27972 free_irq(h->intr[h->intr_mode], h);
27973 }
27974
27975 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
27976 index 7fda30e..eb5dfe0 100644
27977 --- a/drivers/block/cciss.h
27978 +++ b/drivers/block/cciss.h
27979 @@ -101,7 +101,7 @@ struct ctlr_info
27980 /* information about each logical volume */
27981 drive_info_struct *drv[CISS_MAX_LUN];
27982
27983 - struct access_method access;
27984 + struct access_method *access;
27985
27986 /* queue and queue Info */
27987 struct list_head reqQ;
27988 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
27989 index 9125bbe..eede5c8 100644
27990 --- a/drivers/block/cpqarray.c
27991 +++ b/drivers/block/cpqarray.c
27992 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27993 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27994 goto Enomem4;
27995 }
27996 - hba[i]->access.set_intr_mask(hba[i], 0);
27997 + hba[i]->access->set_intr_mask(hba[i], 0);
27998 if (request_irq(hba[i]->intr, do_ida_intr,
27999 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28000 {
28001 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28002 add_timer(&hba[i]->timer);
28003
28004 /* Enable IRQ now that spinlock and rate limit timer are set up */
28005 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28006 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28007
28008 for(j=0; j<NWD; j++) {
28009 struct gendisk *disk = ida_gendisk[i][j];
28010 @@ -694,7 +694,7 @@ DBGINFO(
28011 for(i=0; i<NR_PRODUCTS; i++) {
28012 if (board_id == products[i].board_id) {
28013 c->product_name = products[i].product_name;
28014 - c->access = *(products[i].access);
28015 + c->access = products[i].access;
28016 break;
28017 }
28018 }
28019 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28020 hba[ctlr]->intr = intr;
28021 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28022 hba[ctlr]->product_name = products[j].product_name;
28023 - hba[ctlr]->access = *(products[j].access);
28024 + hba[ctlr]->access = products[j].access;
28025 hba[ctlr]->ctlr = ctlr;
28026 hba[ctlr]->board_id = board_id;
28027 hba[ctlr]->pci_dev = NULL; /* not PCI */
28028 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28029
28030 while((c = h->reqQ) != NULL) {
28031 /* Can't do anything if we're busy */
28032 - if (h->access.fifo_full(h) == 0)
28033 + if (h->access->fifo_full(h) == 0)
28034 return;
28035
28036 /* Get the first entry from the request Q */
28037 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28038 h->Qdepth--;
28039
28040 /* Tell the controller to do our bidding */
28041 - h->access.submit_command(h, c);
28042 + h->access->submit_command(h, c);
28043
28044 /* Get onto the completion Q */
28045 addQ(&h->cmpQ, c);
28046 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28047 unsigned long flags;
28048 __u32 a,a1;
28049
28050 - istat = h->access.intr_pending(h);
28051 + istat = h->access->intr_pending(h);
28052 /* Is this interrupt for us? */
28053 if (istat == 0)
28054 return IRQ_NONE;
28055 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28056 */
28057 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28058 if (istat & FIFO_NOT_EMPTY) {
28059 - while((a = h->access.command_completed(h))) {
28060 + while((a = h->access->command_completed(h))) {
28061 a1 = a; a &= ~3;
28062 if ((c = h->cmpQ) == NULL)
28063 {
28064 @@ -1449,11 +1449,11 @@ static int sendcmd(
28065 /*
28066 * Disable interrupt
28067 */
28068 - info_p->access.set_intr_mask(info_p, 0);
28069 + info_p->access->set_intr_mask(info_p, 0);
28070 /* Make sure there is room in the command FIFO */
28071 /* Actually it should be completely empty at this time. */
28072 for (i = 200000; i > 0; i--) {
28073 - temp = info_p->access.fifo_full(info_p);
28074 + temp = info_p->access->fifo_full(info_p);
28075 if (temp != 0) {
28076 break;
28077 }
28078 @@ -1466,7 +1466,7 @@ DBG(
28079 /*
28080 * Send the cmd
28081 */
28082 - info_p->access.submit_command(info_p, c);
28083 + info_p->access->submit_command(info_p, c);
28084 complete = pollcomplete(ctlr);
28085
28086 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28087 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28088 * we check the new geometry. Then turn interrupts back on when
28089 * we're done.
28090 */
28091 - host->access.set_intr_mask(host, 0);
28092 + host->access->set_intr_mask(host, 0);
28093 getgeometry(ctlr);
28094 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28095 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28096
28097 for(i=0; i<NWD; i++) {
28098 struct gendisk *disk = ida_gendisk[ctlr][i];
28099 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28100 /* Wait (up to 2 seconds) for a command to complete */
28101
28102 for (i = 200000; i > 0; i--) {
28103 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28104 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28105 if (done == 0) {
28106 udelay(10); /* a short fixed delay */
28107 } else
28108 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28109 index be73e9d..7fbf140 100644
28110 --- a/drivers/block/cpqarray.h
28111 +++ b/drivers/block/cpqarray.h
28112 @@ -99,7 +99,7 @@ struct ctlr_info {
28113 drv_info_t drv[NWD];
28114 struct proc_dir_entry *proc;
28115
28116 - struct access_method access;
28117 + struct access_method *access;
28118
28119 cmdlist_t *reqQ;
28120 cmdlist_t *cmpQ;
28121 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28122 index 8d68056..e67050f 100644
28123 --- a/drivers/block/drbd/drbd_int.h
28124 +++ b/drivers/block/drbd/drbd_int.h
28125 @@ -736,7 +736,7 @@ struct drbd_request;
28126 struct drbd_epoch {
28127 struct list_head list;
28128 unsigned int barrier_nr;
28129 - atomic_t epoch_size; /* increased on every request added. */
28130 + atomic_unchecked_t epoch_size; /* increased on every request added. */
28131 atomic_t active; /* increased on every req. added, and dec on every finished. */
28132 unsigned long flags;
28133 };
28134 @@ -1108,7 +1108,7 @@ struct drbd_conf {
28135 void *int_dig_in;
28136 void *int_dig_vv;
28137 wait_queue_head_t seq_wait;
28138 - atomic_t packet_seq;
28139 + atomic_unchecked_t packet_seq;
28140 unsigned int peer_seq;
28141 spinlock_t peer_seq_lock;
28142 unsigned int minor;
28143 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28144
28145 static inline void drbd_tcp_cork(struct socket *sock)
28146 {
28147 - int __user val = 1;
28148 + int val = 1;
28149 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28150 - (char __user *)&val, sizeof(val));
28151 + (char __force_user *)&val, sizeof(val));
28152 }
28153
28154 static inline void drbd_tcp_uncork(struct socket *sock)
28155 {
28156 - int __user val = 0;
28157 + int val = 0;
28158 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28159 - (char __user *)&val, sizeof(val));
28160 + (char __force_user *)&val, sizeof(val));
28161 }
28162
28163 static inline void drbd_tcp_nodelay(struct socket *sock)
28164 {
28165 - int __user val = 1;
28166 + int val = 1;
28167 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28168 - (char __user *)&val, sizeof(val));
28169 + (char __force_user *)&val, sizeof(val));
28170 }
28171
28172 static inline void drbd_tcp_quickack(struct socket *sock)
28173 {
28174 - int __user val = 2;
28175 + int val = 2;
28176 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28177 - (char __user *)&val, sizeof(val));
28178 + (char __force_user *)&val, sizeof(val));
28179 }
28180
28181 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28182 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28183 index 211fc44..c5116f1 100644
28184 --- a/drivers/block/drbd/drbd_main.c
28185 +++ b/drivers/block/drbd/drbd_main.c
28186 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28187 p.sector = sector;
28188 p.block_id = block_id;
28189 p.blksize = blksize;
28190 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28191 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28192
28193 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28194 return false;
28195 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28196 p.sector = cpu_to_be64(req->sector);
28197 p.block_id = (unsigned long)req;
28198 p.seq_num = cpu_to_be32(req->seq_num =
28199 - atomic_add_return(1, &mdev->packet_seq));
28200 + atomic_add_return_unchecked(1, &mdev->packet_seq));
28201
28202 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28203
28204 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28205 atomic_set(&mdev->unacked_cnt, 0);
28206 atomic_set(&mdev->local_cnt, 0);
28207 atomic_set(&mdev->net_cnt, 0);
28208 - atomic_set(&mdev->packet_seq, 0);
28209 + atomic_set_unchecked(&mdev->packet_seq, 0);
28210 atomic_set(&mdev->pp_in_use, 0);
28211 atomic_set(&mdev->pp_in_use_by_net, 0);
28212 atomic_set(&mdev->rs_sect_in, 0);
28213 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28214 mdev->receiver.t_state);
28215
28216 /* no need to lock it, I'm the only thread alive */
28217 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28218 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28219 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28220 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28221 mdev->al_writ_cnt =
28222 mdev->bm_writ_cnt =
28223 mdev->read_cnt =
28224 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28225 index af2a250..219c74b 100644
28226 --- a/drivers/block/drbd/drbd_nl.c
28227 +++ b/drivers/block/drbd/drbd_nl.c
28228 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28229 module_put(THIS_MODULE);
28230 }
28231
28232 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28233 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28234
28235 static unsigned short *
28236 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28237 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28238 cn_reply->id.idx = CN_IDX_DRBD;
28239 cn_reply->id.val = CN_VAL_DRBD;
28240
28241 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28242 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28243 cn_reply->ack = 0; /* not used here. */
28244 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28245 (int)((char *)tl - (char *)reply->tag_list);
28246 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28247 cn_reply->id.idx = CN_IDX_DRBD;
28248 cn_reply->id.val = CN_VAL_DRBD;
28249
28250 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28251 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28252 cn_reply->ack = 0; /* not used here. */
28253 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28254 (int)((char *)tl - (char *)reply->tag_list);
28255 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28256 cn_reply->id.idx = CN_IDX_DRBD;
28257 cn_reply->id.val = CN_VAL_DRBD;
28258
28259 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28260 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28261 cn_reply->ack = 0; // not used here.
28262 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28263 (int)((char*)tl - (char*)reply->tag_list);
28264 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28265 cn_reply->id.idx = CN_IDX_DRBD;
28266 cn_reply->id.val = CN_VAL_DRBD;
28267
28268 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28269 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28270 cn_reply->ack = 0; /* not used here. */
28271 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28272 (int)((char *)tl - (char *)reply->tag_list);
28273 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28274 index 43beaca..4a5b1dd 100644
28275 --- a/drivers/block/drbd/drbd_receiver.c
28276 +++ b/drivers/block/drbd/drbd_receiver.c
28277 @@ -894,7 +894,7 @@ retry:
28278 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28279 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28280
28281 - atomic_set(&mdev->packet_seq, 0);
28282 + atomic_set_unchecked(&mdev->packet_seq, 0);
28283 mdev->peer_seq = 0;
28284
28285 drbd_thread_start(&mdev->asender);
28286 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28287 do {
28288 next_epoch = NULL;
28289
28290 - epoch_size = atomic_read(&epoch->epoch_size);
28291 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28292
28293 switch (ev & ~EV_CLEANUP) {
28294 case EV_PUT:
28295 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28296 rv = FE_DESTROYED;
28297 } else {
28298 epoch->flags = 0;
28299 - atomic_set(&epoch->epoch_size, 0);
28300 + atomic_set_unchecked(&epoch->epoch_size, 0);
28301 /* atomic_set(&epoch->active, 0); is already zero */
28302 if (rv == FE_STILL_LIVE)
28303 rv = FE_RECYCLED;
28304 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28305 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28306 drbd_flush(mdev);
28307
28308 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28309 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28310 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28311 if (epoch)
28312 break;
28313 }
28314
28315 epoch = mdev->current_epoch;
28316 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28317 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28318
28319 D_ASSERT(atomic_read(&epoch->active) == 0);
28320 D_ASSERT(epoch->flags == 0);
28321 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28322 }
28323
28324 epoch->flags = 0;
28325 - atomic_set(&epoch->epoch_size, 0);
28326 + atomic_set_unchecked(&epoch->epoch_size, 0);
28327 atomic_set(&epoch->active, 0);
28328
28329 spin_lock(&mdev->epoch_lock);
28330 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28331 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28332 list_add(&epoch->list, &mdev->current_epoch->list);
28333 mdev->current_epoch = epoch;
28334 mdev->epochs++;
28335 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28336 spin_unlock(&mdev->peer_seq_lock);
28337
28338 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28339 - atomic_inc(&mdev->current_epoch->epoch_size);
28340 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28341 return drbd_drain_block(mdev, data_size);
28342 }
28343
28344 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28345
28346 spin_lock(&mdev->epoch_lock);
28347 e->epoch = mdev->current_epoch;
28348 - atomic_inc(&e->epoch->epoch_size);
28349 + atomic_inc_unchecked(&e->epoch->epoch_size);
28350 atomic_inc(&e->epoch->active);
28351 spin_unlock(&mdev->epoch_lock);
28352
28353 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28354 D_ASSERT(list_empty(&mdev->done_ee));
28355
28356 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28357 - atomic_set(&mdev->current_epoch->epoch_size, 0);
28358 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28359 D_ASSERT(list_empty(&mdev->current_epoch->list));
28360 }
28361
28362 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28363 index cd50435..ba1ffb5 100644
28364 --- a/drivers/block/loop.c
28365 +++ b/drivers/block/loop.c
28366 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
28367 mm_segment_t old_fs = get_fs();
28368
28369 set_fs(get_ds());
28370 - bw = file->f_op->write(file, buf, len, &pos);
28371 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28372 set_fs(old_fs);
28373 if (likely(bw == len))
28374 return 0;
28375 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28376 index 4364303..9adf4ee 100644
28377 --- a/drivers/char/Kconfig
28378 +++ b/drivers/char/Kconfig
28379 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28380
28381 config DEVKMEM
28382 bool "/dev/kmem virtual device support"
28383 - default y
28384 + default n
28385 + depends on !GRKERNSEC_KMEM
28386 help
28387 Say Y here if you want to support the /dev/kmem device. The
28388 /dev/kmem device is rarely used, but can be used for certain
28389 @@ -596,6 +597,7 @@ config DEVPORT
28390 bool
28391 depends on !M68K
28392 depends on ISA || PCI
28393 + depends on !GRKERNSEC_KMEM
28394 default y
28395
28396 source "drivers/s390/char/Kconfig"
28397 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28398 index 2e04433..22afc64 100644
28399 --- a/drivers/char/agp/frontend.c
28400 +++ b/drivers/char/agp/frontend.c
28401 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28402 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28403 return -EFAULT;
28404
28405 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28406 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28407 return -EFAULT;
28408
28409 client = agp_find_client_by_pid(reserve.pid);
28410 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28411 index 095ab90..afad0a4 100644
28412 --- a/drivers/char/briq_panel.c
28413 +++ b/drivers/char/briq_panel.c
28414 @@ -9,6 +9,7 @@
28415 #include <linux/types.h>
28416 #include <linux/errno.h>
28417 #include <linux/tty.h>
28418 +#include <linux/mutex.h>
28419 #include <linux/timer.h>
28420 #include <linux/kernel.h>
28421 #include <linux/wait.h>
28422 @@ -34,6 +35,7 @@ static int vfd_is_open;
28423 static unsigned char vfd[40];
28424 static int vfd_cursor;
28425 static unsigned char ledpb, led;
28426 +static DEFINE_MUTEX(vfd_mutex);
28427
28428 static void update_vfd(void)
28429 {
28430 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28431 if (!vfd_is_open)
28432 return -EBUSY;
28433
28434 + mutex_lock(&vfd_mutex);
28435 for (;;) {
28436 char c;
28437 if (!indx)
28438 break;
28439 - if (get_user(c, buf))
28440 + if (get_user(c, buf)) {
28441 + mutex_unlock(&vfd_mutex);
28442 return -EFAULT;
28443 + }
28444 if (esc) {
28445 set_led(c);
28446 esc = 0;
28447 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28448 buf++;
28449 }
28450 update_vfd();
28451 + mutex_unlock(&vfd_mutex);
28452
28453 return len;
28454 }
28455 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
28456 index f773a9d..65cd683 100644
28457 --- a/drivers/char/genrtc.c
28458 +++ b/drivers/char/genrtc.c
28459 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
28460 switch (cmd) {
28461
28462 case RTC_PLL_GET:
28463 + memset(&pll, 0, sizeof(pll));
28464 if (get_rtc_pll(&pll))
28465 return -EINVAL;
28466 else
28467 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
28468 index 0833896..cccce52 100644
28469 --- a/drivers/char/hpet.c
28470 +++ b/drivers/char/hpet.c
28471 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
28472 }
28473
28474 static int
28475 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
28476 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
28477 struct hpet_info *info)
28478 {
28479 struct hpet_timer __iomem *timer;
28480 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
28481 index 58c0e63..46c16bf 100644
28482 --- a/drivers/char/ipmi/ipmi_msghandler.c
28483 +++ b/drivers/char/ipmi/ipmi_msghandler.c
28484 @@ -415,7 +415,7 @@ struct ipmi_smi {
28485 struct proc_dir_entry *proc_dir;
28486 char proc_dir_name[10];
28487
28488 - atomic_t stats[IPMI_NUM_STATS];
28489 + atomic_unchecked_t stats[IPMI_NUM_STATS];
28490
28491 /*
28492 * run_to_completion duplicate of smb_info, smi_info
28493 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
28494
28495
28496 #define ipmi_inc_stat(intf, stat) \
28497 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
28498 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
28499 #define ipmi_get_stat(intf, stat) \
28500 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
28501 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
28502
28503 static int is_lan_addr(struct ipmi_addr *addr)
28504 {
28505 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
28506 INIT_LIST_HEAD(&intf->cmd_rcvrs);
28507 init_waitqueue_head(&intf->waitq);
28508 for (i = 0; i < IPMI_NUM_STATS; i++)
28509 - atomic_set(&intf->stats[i], 0);
28510 + atomic_set_unchecked(&intf->stats[i], 0);
28511
28512 intf->proc_dir = NULL;
28513
28514 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
28515 index 50fcf9c..91b5528 100644
28516 --- a/drivers/char/ipmi/ipmi_si_intf.c
28517 +++ b/drivers/char/ipmi/ipmi_si_intf.c
28518 @@ -277,7 +277,7 @@ struct smi_info {
28519 unsigned char slave_addr;
28520
28521 /* Counters and things for the proc filesystem. */
28522 - atomic_t stats[SI_NUM_STATS];
28523 + atomic_unchecked_t stats[SI_NUM_STATS];
28524
28525 struct task_struct *thread;
28526
28527 @@ -286,9 +286,9 @@ struct smi_info {
28528 };
28529
28530 #define smi_inc_stat(smi, stat) \
28531 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28532 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28533 #define smi_get_stat(smi, stat) \
28534 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28535 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28536
28537 #define SI_MAX_PARMS 4
28538
28539 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
28540 atomic_set(&new_smi->req_events, 0);
28541 new_smi->run_to_completion = 0;
28542 for (i = 0; i < SI_NUM_STATS; i++)
28543 - atomic_set(&new_smi->stats[i], 0);
28544 + atomic_set_unchecked(&new_smi->stats[i], 0);
28545
28546 new_smi->interrupt_disabled = 1;
28547 atomic_set(&new_smi->stop_operation, 0);
28548 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
28549 index 1aeaaba..e018570 100644
28550 --- a/drivers/char/mbcs.c
28551 +++ b/drivers/char/mbcs.c
28552 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
28553 return 0;
28554 }
28555
28556 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
28557 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
28558 {
28559 .part_num = MBCS_PART_NUM,
28560 .mfg_num = MBCS_MFG_NUM,
28561 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
28562 index d6e9d08..4493e89 100644
28563 --- a/drivers/char/mem.c
28564 +++ b/drivers/char/mem.c
28565 @@ -18,6 +18,7 @@
28566 #include <linux/raw.h>
28567 #include <linux/tty.h>
28568 #include <linux/capability.h>
28569 +#include <linux/security.h>
28570 #include <linux/ptrace.h>
28571 #include <linux/device.h>
28572 #include <linux/highmem.h>
28573 @@ -35,6 +36,10 @@
28574 # include <linux/efi.h>
28575 #endif
28576
28577 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28578 +extern const struct file_operations grsec_fops;
28579 +#endif
28580 +
28581 static inline unsigned long size_inside_page(unsigned long start,
28582 unsigned long size)
28583 {
28584 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28585
28586 while (cursor < to) {
28587 if (!devmem_is_allowed(pfn)) {
28588 +#ifdef CONFIG_GRKERNSEC_KMEM
28589 + gr_handle_mem_readwrite(from, to);
28590 +#else
28591 printk(KERN_INFO
28592 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28593 current->comm, from, to);
28594 +#endif
28595 return 0;
28596 }
28597 cursor += PAGE_SIZE;
28598 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28599 }
28600 return 1;
28601 }
28602 +#elif defined(CONFIG_GRKERNSEC_KMEM)
28603 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28604 +{
28605 + return 0;
28606 +}
28607 #else
28608 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28609 {
28610 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28611
28612 while (count > 0) {
28613 unsigned long remaining;
28614 + char *temp;
28615
28616 sz = size_inside_page(p, count);
28617
28618 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28619 if (!ptr)
28620 return -EFAULT;
28621
28622 - remaining = copy_to_user(buf, ptr, sz);
28623 +#ifdef CONFIG_PAX_USERCOPY
28624 + temp = kmalloc(sz, GFP_KERNEL);
28625 + if (!temp) {
28626 + unxlate_dev_mem_ptr(p, ptr);
28627 + return -ENOMEM;
28628 + }
28629 + memcpy(temp, ptr, sz);
28630 +#else
28631 + temp = ptr;
28632 +#endif
28633 +
28634 + remaining = copy_to_user(buf, temp, sz);
28635 +
28636 +#ifdef CONFIG_PAX_USERCOPY
28637 + kfree(temp);
28638 +#endif
28639 +
28640 unxlate_dev_mem_ptr(p, ptr);
28641 if (remaining)
28642 return -EFAULT;
28643 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28644 size_t count, loff_t *ppos)
28645 {
28646 unsigned long p = *ppos;
28647 - ssize_t low_count, read, sz;
28648 + ssize_t low_count, read, sz, err = 0;
28649 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28650 - int err = 0;
28651
28652 read = 0;
28653 if (p < (unsigned long) high_memory) {
28654 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28655 }
28656 #endif
28657 while (low_count > 0) {
28658 + char *temp;
28659 +
28660 sz = size_inside_page(p, low_count);
28661
28662 /*
28663 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28664 */
28665 kbuf = xlate_dev_kmem_ptr((char *)p);
28666
28667 - if (copy_to_user(buf, kbuf, sz))
28668 +#ifdef CONFIG_PAX_USERCOPY
28669 + temp = kmalloc(sz, GFP_KERNEL);
28670 + if (!temp)
28671 + return -ENOMEM;
28672 + memcpy(temp, kbuf, sz);
28673 +#else
28674 + temp = kbuf;
28675 +#endif
28676 +
28677 + err = copy_to_user(buf, temp, sz);
28678 +
28679 +#ifdef CONFIG_PAX_USERCOPY
28680 + kfree(temp);
28681 +#endif
28682 +
28683 + if (err)
28684 return -EFAULT;
28685 buf += sz;
28686 p += sz;
28687 @@ -867,6 +914,9 @@ static const struct memdev {
28688 #ifdef CONFIG_CRASH_DUMP
28689 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28690 #endif
28691 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28692 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28693 +#endif
28694 };
28695
28696 static int memory_open(struct inode *inode, struct file *filp)
28697 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
28698 index da3cfee..a5a6606 100644
28699 --- a/drivers/char/nvram.c
28700 +++ b/drivers/char/nvram.c
28701 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
28702
28703 spin_unlock_irq(&rtc_lock);
28704
28705 - if (copy_to_user(buf, contents, tmp - contents))
28706 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
28707 return -EFAULT;
28708
28709 *ppos = i;
28710 diff --git a/drivers/char/random.c b/drivers/char/random.c
28711 index 54ca8b2..d58cb51 100644
28712 --- a/drivers/char/random.c
28713 +++ b/drivers/char/random.c
28714 @@ -261,8 +261,13 @@
28715 /*
28716 * Configuration information
28717 */
28718 +#ifdef CONFIG_GRKERNSEC_RANDNET
28719 +#define INPUT_POOL_WORDS 512
28720 +#define OUTPUT_POOL_WORDS 128
28721 +#else
28722 #define INPUT_POOL_WORDS 128
28723 #define OUTPUT_POOL_WORDS 32
28724 +#endif
28725 #define SEC_XFER_SIZE 512
28726 #define EXTRACT_SIZE 10
28727
28728 @@ -300,10 +305,17 @@ static struct poolinfo {
28729 int poolwords;
28730 int tap1, tap2, tap3, tap4, tap5;
28731 } poolinfo_table[] = {
28732 +#ifdef CONFIG_GRKERNSEC_RANDNET
28733 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28734 + { 512, 411, 308, 208, 104, 1 },
28735 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28736 + { 128, 103, 76, 51, 25, 1 },
28737 +#else
28738 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28739 { 128, 103, 76, 51, 25, 1 },
28740 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28741 { 32, 26, 20, 14, 7, 1 },
28742 +#endif
28743 #if 0
28744 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28745 { 2048, 1638, 1231, 819, 411, 1 },
28746 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
28747
28748 extract_buf(r, tmp);
28749 i = min_t(int, nbytes, EXTRACT_SIZE);
28750 - if (copy_to_user(buf, tmp, i)) {
28751 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
28752 ret = -EFAULT;
28753 break;
28754 }
28755 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28756 #include <linux/sysctl.h>
28757
28758 static int min_read_thresh = 8, min_write_thresh;
28759 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28760 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28761 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28762 static char sysctl_bootid[16];
28763
28764 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
28765 index 1ee8ce7..b778bef 100644
28766 --- a/drivers/char/sonypi.c
28767 +++ b/drivers/char/sonypi.c
28768 @@ -55,6 +55,7 @@
28769 #include <asm/uaccess.h>
28770 #include <asm/io.h>
28771 #include <asm/system.h>
28772 +#include <asm/local.h>
28773
28774 #include <linux/sonypi.h>
28775
28776 @@ -491,7 +492,7 @@ static struct sonypi_device {
28777 spinlock_t fifo_lock;
28778 wait_queue_head_t fifo_proc_list;
28779 struct fasync_struct *fifo_async;
28780 - int open_count;
28781 + local_t open_count;
28782 int model;
28783 struct input_dev *input_jog_dev;
28784 struct input_dev *input_key_dev;
28785 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
28786 static int sonypi_misc_release(struct inode *inode, struct file *file)
28787 {
28788 mutex_lock(&sonypi_device.lock);
28789 - sonypi_device.open_count--;
28790 + local_dec(&sonypi_device.open_count);
28791 mutex_unlock(&sonypi_device.lock);
28792 return 0;
28793 }
28794 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
28795 {
28796 mutex_lock(&sonypi_device.lock);
28797 /* Flush input queue on first open */
28798 - if (!sonypi_device.open_count)
28799 + if (!local_read(&sonypi_device.open_count))
28800 kfifo_reset(&sonypi_device.fifo);
28801 - sonypi_device.open_count++;
28802 + local_inc(&sonypi_device.open_count);
28803 mutex_unlock(&sonypi_device.lock);
28804
28805 return 0;
28806 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
28807 index 32362cf..32a96e9 100644
28808 --- a/drivers/char/tpm/tpm.c
28809 +++ b/drivers/char/tpm/tpm.c
28810 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
28811 chip->vendor.req_complete_val)
28812 goto out_recv;
28813
28814 - if ((status == chip->vendor.req_canceled)) {
28815 + if (status == chip->vendor.req_canceled) {
28816 dev_err(chip->dev, "Operation Canceled\n");
28817 rc = -ECANCELED;
28818 goto out;
28819 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
28820 index 0636520..169c1d0 100644
28821 --- a/drivers/char/tpm/tpm_bios.c
28822 +++ b/drivers/char/tpm/tpm_bios.c
28823 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
28824 event = addr;
28825
28826 if ((event->event_type == 0 && event->event_size == 0) ||
28827 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28828 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28829 return NULL;
28830
28831 return addr;
28832 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
28833 return NULL;
28834
28835 if ((event->event_type == 0 && event->event_size == 0) ||
28836 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28837 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28838 return NULL;
28839
28840 (*pos)++;
28841 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
28842 int i;
28843
28844 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28845 - seq_putc(m, data[i]);
28846 + if (!seq_putc(m, data[i]))
28847 + return -EFAULT;
28848
28849 return 0;
28850 }
28851 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
28852 log->bios_event_log_end = log->bios_event_log + len;
28853
28854 virt = acpi_os_map_memory(start, len);
28855 + if (!virt) {
28856 + kfree(log->bios_event_log);
28857 + log->bios_event_log = NULL;
28858 + return -EFAULT;
28859 + }
28860
28861 - memcpy(log->bios_event_log, virt, len);
28862 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
28863
28864 acpi_os_unmap_memory(virt, len);
28865 return 0;
28866 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
28867 index b58b561..c9088c8 100644
28868 --- a/drivers/char/virtio_console.c
28869 +++ b/drivers/char/virtio_console.c
28870 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
28871 if (to_user) {
28872 ssize_t ret;
28873
28874 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
28875 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
28876 if (ret)
28877 return -EFAULT;
28878 } else {
28879 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
28880 if (!port_has_data(port) && !port->host_connected)
28881 return 0;
28882
28883 - return fill_readbuf(port, ubuf, count, true);
28884 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
28885 }
28886
28887 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
28888 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
28889 index c9eee6d..f9d5280 100644
28890 --- a/drivers/edac/amd64_edac.c
28891 +++ b/drivers/edac/amd64_edac.c
28892 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
28893 * PCI core identifies what devices are on a system during boot, and then
28894 * inquiry this table to see if this driver is for a given device found.
28895 */
28896 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
28897 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
28898 {
28899 .vendor = PCI_VENDOR_ID_AMD,
28900 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
28901 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
28902 index e47e73b..348e0bd 100644
28903 --- a/drivers/edac/amd76x_edac.c
28904 +++ b/drivers/edac/amd76x_edac.c
28905 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
28906 edac_mc_free(mci);
28907 }
28908
28909 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
28910 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
28911 {
28912 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28913 AMD762},
28914 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
28915 index 1af531a..3a8ff27 100644
28916 --- a/drivers/edac/e752x_edac.c
28917 +++ b/drivers/edac/e752x_edac.c
28918 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
28919 edac_mc_free(mci);
28920 }
28921
28922 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
28923 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
28924 {
28925 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28926 E7520},
28927 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
28928 index 6ffb6d2..383d8d7 100644
28929 --- a/drivers/edac/e7xxx_edac.c
28930 +++ b/drivers/edac/e7xxx_edac.c
28931 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
28932 edac_mc_free(mci);
28933 }
28934
28935 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
28936 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
28937 {
28938 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28939 E7205},
28940 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
28941 index 97f5064..202b6e6 100644
28942 --- a/drivers/edac/edac_pci_sysfs.c
28943 +++ b/drivers/edac/edac_pci_sysfs.c
28944 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
28945 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28946 static int edac_pci_poll_msec = 1000; /* one second workq period */
28947
28948 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28949 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28950 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28951 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28952
28953 static struct kobject *edac_pci_top_main_kobj;
28954 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28955 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28956 edac_printk(KERN_CRIT, EDAC_PCI,
28957 "Signaled System Error on %s\n",
28958 pci_name(dev));
28959 - atomic_inc(&pci_nonparity_count);
28960 + atomic_inc_unchecked(&pci_nonparity_count);
28961 }
28962
28963 if (status & (PCI_STATUS_PARITY)) {
28964 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28965 "Master Data Parity Error on %s\n",
28966 pci_name(dev));
28967
28968 - atomic_inc(&pci_parity_count);
28969 + atomic_inc_unchecked(&pci_parity_count);
28970 }
28971
28972 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28973 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28974 "Detected Parity Error on %s\n",
28975 pci_name(dev));
28976
28977 - atomic_inc(&pci_parity_count);
28978 + atomic_inc_unchecked(&pci_parity_count);
28979 }
28980 }
28981
28982 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28983 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28984 "Signaled System Error on %s\n",
28985 pci_name(dev));
28986 - atomic_inc(&pci_nonparity_count);
28987 + atomic_inc_unchecked(&pci_nonparity_count);
28988 }
28989
28990 if (status & (PCI_STATUS_PARITY)) {
28991 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28992 "Master Data Parity Error on "
28993 "%s\n", pci_name(dev));
28994
28995 - atomic_inc(&pci_parity_count);
28996 + atomic_inc_unchecked(&pci_parity_count);
28997 }
28998
28999 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29000 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29001 "Detected Parity Error on %s\n",
29002 pci_name(dev));
29003
29004 - atomic_inc(&pci_parity_count);
29005 + atomic_inc_unchecked(&pci_parity_count);
29006 }
29007 }
29008 }
29009 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29010 if (!check_pci_errors)
29011 return;
29012
29013 - before_count = atomic_read(&pci_parity_count);
29014 + before_count = atomic_read_unchecked(&pci_parity_count);
29015
29016 /* scan all PCI devices looking for a Parity Error on devices and
29017 * bridges.
29018 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29019 /* Only if operator has selected panic on PCI Error */
29020 if (edac_pci_get_panic_on_pe()) {
29021 /* If the count is different 'after' from 'before' */
29022 - if (before_count != atomic_read(&pci_parity_count))
29023 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29024 panic("EDAC: PCI Parity Error");
29025 }
29026 }
29027 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29028 index c0510b3..6e2a954 100644
29029 --- a/drivers/edac/i3000_edac.c
29030 +++ b/drivers/edac/i3000_edac.c
29031 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29032 edac_mc_free(mci);
29033 }
29034
29035 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29036 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29037 {
29038 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29039 I3000},
29040 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29041 index 73f55e200..5faaf59 100644
29042 --- a/drivers/edac/i3200_edac.c
29043 +++ b/drivers/edac/i3200_edac.c
29044 @@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29045 edac_mc_free(mci);
29046 }
29047
29048 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29049 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29050 {
29051 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29052 I3200},
29053 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29054 index 4dc3ac2..67d05a6 100644
29055 --- a/drivers/edac/i5000_edac.c
29056 +++ b/drivers/edac/i5000_edac.c
29057 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29058 *
29059 * The "E500P" device is the first device supported.
29060 */
29061 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29062 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29063 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29064 .driver_data = I5000P},
29065
29066 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29067 index bcbdeec..9886d16 100644
29068 --- a/drivers/edac/i5100_edac.c
29069 +++ b/drivers/edac/i5100_edac.c
29070 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29071 edac_mc_free(mci);
29072 }
29073
29074 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29075 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29076 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29077 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29078 { 0, }
29079 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29080 index 74d6ec34..baff517 100644
29081 --- a/drivers/edac/i5400_edac.c
29082 +++ b/drivers/edac/i5400_edac.c
29083 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29084 *
29085 * The "E500P" device is the first device supported.
29086 */
29087 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29088 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29089 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29090 {0,} /* 0 terminated list. */
29091 };
29092 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29093 index 6104dba..e7ea8e1 100644
29094 --- a/drivers/edac/i7300_edac.c
29095 +++ b/drivers/edac/i7300_edac.c
29096 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29097 *
29098 * Has only 8086:360c PCI ID
29099 */
29100 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29101 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29102 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29103 {0,} /* 0 terminated list. */
29104 };
29105 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29106 index 8568d9b..42b2fa8 100644
29107 --- a/drivers/edac/i7core_edac.c
29108 +++ b/drivers/edac/i7core_edac.c
29109 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29110 /*
29111 * pci_device_id table for which devices we are looking for
29112 */
29113 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29114 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29115 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29116 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29117 {0,} /* 0 terminated list. */
29118 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29119 index 4329d39..f3022ef 100644
29120 --- a/drivers/edac/i82443bxgx_edac.c
29121 +++ b/drivers/edac/i82443bxgx_edac.c
29122 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29123
29124 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29125
29126 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29127 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29128 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29129 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29130 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29131 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29132 index 931a057..fd28340 100644
29133 --- a/drivers/edac/i82860_edac.c
29134 +++ b/drivers/edac/i82860_edac.c
29135 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29136 edac_mc_free(mci);
29137 }
29138
29139 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29140 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29141 {
29142 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29143 I82860},
29144 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29145 index 33864c6..01edc61 100644
29146 --- a/drivers/edac/i82875p_edac.c
29147 +++ b/drivers/edac/i82875p_edac.c
29148 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29149 edac_mc_free(mci);
29150 }
29151
29152 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29153 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29154 {
29155 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29156 I82875P},
29157 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29158 index 4184e01..dcb2cd3 100644
29159 --- a/drivers/edac/i82975x_edac.c
29160 +++ b/drivers/edac/i82975x_edac.c
29161 @@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29162 edac_mc_free(mci);
29163 }
29164
29165 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29166 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29167 {
29168 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29169 I82975X
29170 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29171 index 0106747..0b40417 100644
29172 --- a/drivers/edac/mce_amd.h
29173 +++ b/drivers/edac/mce_amd.h
29174 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
29175 bool (*dc_mce)(u16, u8);
29176 bool (*ic_mce)(u16, u8);
29177 bool (*nb_mce)(u16, u8);
29178 -};
29179 +} __no_const;
29180
29181 void amd_report_gart_errors(bool);
29182 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29183 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29184 index e294e1b..a41b05b 100644
29185 --- a/drivers/edac/r82600_edac.c
29186 +++ b/drivers/edac/r82600_edac.c
29187 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29188 edac_mc_free(mci);
29189 }
29190
29191 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29192 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29193 {
29194 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29195 },
29196 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29197 index 1dc118d..8c68af9 100644
29198 --- a/drivers/edac/sb_edac.c
29199 +++ b/drivers/edac/sb_edac.c
29200 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29201 /*
29202 * pci_device_id table for which devices we are looking for
29203 */
29204 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29205 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29207 {0,} /* 0 terminated list. */
29208 };
29209 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29210 index b6f47de..c5acf3a 100644
29211 --- a/drivers/edac/x38_edac.c
29212 +++ b/drivers/edac/x38_edac.c
29213 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29214 edac_mc_free(mci);
29215 }
29216
29217 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29218 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29219 {
29220 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29221 X38},
29222 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29223 index 85661b0..c784559a 100644
29224 --- a/drivers/firewire/core-card.c
29225 +++ b/drivers/firewire/core-card.c
29226 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29227
29228 void fw_core_remove_card(struct fw_card *card)
29229 {
29230 - struct fw_card_driver dummy_driver = dummy_driver_template;
29231 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29232
29233 card->driver->update_phy_reg(card, 4,
29234 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29235 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29236 index 4799393..37bd3ab 100644
29237 --- a/drivers/firewire/core-cdev.c
29238 +++ b/drivers/firewire/core-cdev.c
29239 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29240 int ret;
29241
29242 if ((request->channels == 0 && request->bandwidth == 0) ||
29243 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29244 - request->bandwidth < 0)
29245 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29246 return -EINVAL;
29247
29248 r = kmalloc(sizeof(*r), GFP_KERNEL);
29249 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29250 index 855ab3f..11f4bbd 100644
29251 --- a/drivers/firewire/core-transaction.c
29252 +++ b/drivers/firewire/core-transaction.c
29253 @@ -37,6 +37,7 @@
29254 #include <linux/timer.h>
29255 #include <linux/types.h>
29256 #include <linux/workqueue.h>
29257 +#include <linux/sched.h>
29258
29259 #include <asm/byteorder.h>
29260
29261 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29262 index b45be57..5fad18b 100644
29263 --- a/drivers/firewire/core.h
29264 +++ b/drivers/firewire/core.h
29265 @@ -101,6 +101,7 @@ struct fw_card_driver {
29266
29267 int (*stop_iso)(struct fw_iso_context *ctx);
29268 };
29269 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29270
29271 void fw_card_initialize(struct fw_card *card,
29272 const struct fw_card_driver *driver, struct device *device);
29273 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29274 index 153980b..4b4d046 100644
29275 --- a/drivers/firmware/dmi_scan.c
29276 +++ b/drivers/firmware/dmi_scan.c
29277 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29278 }
29279 }
29280 else {
29281 - /*
29282 - * no iounmap() for that ioremap(); it would be a no-op, but
29283 - * it's so early in setup that sucker gets confused into doing
29284 - * what it shouldn't if we actually call it.
29285 - */
29286 p = dmi_ioremap(0xF0000, 0x10000);
29287 if (p == NULL)
29288 goto error;
29289 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29290 if (buf == NULL)
29291 return -1;
29292
29293 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29294 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29295
29296 iounmap(buf);
29297 return 0;
29298 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29299 index 82d5c20..44a7177 100644
29300 --- a/drivers/gpio/gpio-vr41xx.c
29301 +++ b/drivers/gpio/gpio-vr41xx.c
29302 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29303 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29304 maskl, pendl, maskh, pendh);
29305
29306 - atomic_inc(&irq_err_count);
29307 + atomic_inc_unchecked(&irq_err_count);
29308
29309 return -EINVAL;
29310 }
29311 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29312 index 84a4a80..ce0306e 100644
29313 --- a/drivers/gpu/drm/drm_crtc_helper.c
29314 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29315 @@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29316 struct drm_crtc *tmp;
29317 int crtc_mask = 1;
29318
29319 - WARN(!crtc, "checking null crtc?\n");
29320 + BUG_ON(!crtc);
29321
29322 dev = crtc->dev;
29323
29324 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29325 index ebf7d3f..d64c436 100644
29326 --- a/drivers/gpu/drm/drm_drv.c
29327 +++ b/drivers/gpu/drm/drm_drv.c
29328 @@ -312,7 +312,7 @@ module_exit(drm_core_exit);
29329 /**
29330 * Copy and IOCTL return string to user space
29331 */
29332 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29333 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29334 {
29335 int len;
29336
29337 @@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
29338
29339 dev = file_priv->minor->dev;
29340 atomic_inc(&dev->ioctl_count);
29341 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29342 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29343 ++file_priv->ioctl_count;
29344
29345 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29346 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29347 index 6263b01..7987f55 100644
29348 --- a/drivers/gpu/drm/drm_fops.c
29349 +++ b/drivers/gpu/drm/drm_fops.c
29350 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29351 }
29352
29353 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29354 - atomic_set(&dev->counts[i], 0);
29355 + atomic_set_unchecked(&dev->counts[i], 0);
29356
29357 dev->sigdata.lock = NULL;
29358
29359 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29360
29361 retcode = drm_open_helper(inode, filp, dev);
29362 if (!retcode) {
29363 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29364 - if (!dev->open_count++)
29365 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29366 + if (local_inc_return(&dev->open_count) == 1)
29367 retcode = drm_setup(dev);
29368 }
29369 if (!retcode) {
29370 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29371
29372 mutex_lock(&drm_global_mutex);
29373
29374 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29375 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29376
29377 if (dev->driver->preclose)
29378 dev->driver->preclose(dev, file_priv);
29379 @@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
29380 * Begin inline drm_release
29381 */
29382
29383 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29384 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29385 task_pid_nr(current),
29386 (long)old_encode_dev(file_priv->minor->device),
29387 - dev->open_count);
29388 + local_read(&dev->open_count));
29389
29390 /* Release any auth tokens that might point to this file_priv,
29391 (do that under the drm_global_mutex) */
29392 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29393 * End inline drm_release
29394 */
29395
29396 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29397 - if (!--dev->open_count) {
29398 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29399 + if (local_dec_and_test(&dev->open_count)) {
29400 if (atomic_read(&dev->ioctl_count)) {
29401 DRM_ERROR("Device busy: %d\n",
29402 atomic_read(&dev->ioctl_count));
29403 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29404 index c87dc96..326055d 100644
29405 --- a/drivers/gpu/drm/drm_global.c
29406 +++ b/drivers/gpu/drm/drm_global.c
29407 @@ -36,7 +36,7 @@
29408 struct drm_global_item {
29409 struct mutex mutex;
29410 void *object;
29411 - int refcount;
29412 + atomic_t refcount;
29413 };
29414
29415 static struct drm_global_item glob[DRM_GLOBAL_NUM];
29416 @@ -49,7 +49,7 @@ void drm_global_init(void)
29417 struct drm_global_item *item = &glob[i];
29418 mutex_init(&item->mutex);
29419 item->object = NULL;
29420 - item->refcount = 0;
29421 + atomic_set(&item->refcount, 0);
29422 }
29423 }
29424
29425 @@ -59,7 +59,7 @@ void drm_global_release(void)
29426 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
29427 struct drm_global_item *item = &glob[i];
29428 BUG_ON(item->object != NULL);
29429 - BUG_ON(item->refcount != 0);
29430 + BUG_ON(atomic_read(&item->refcount) != 0);
29431 }
29432 }
29433
29434 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29435 void *object;
29436
29437 mutex_lock(&item->mutex);
29438 - if (item->refcount == 0) {
29439 + if (atomic_read(&item->refcount) == 0) {
29440 item->object = kzalloc(ref->size, GFP_KERNEL);
29441 if (unlikely(item->object == NULL)) {
29442 ret = -ENOMEM;
29443 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29444 goto out_err;
29445
29446 }
29447 - ++item->refcount;
29448 + atomic_inc(&item->refcount);
29449 ref->object = item->object;
29450 object = item->object;
29451 mutex_unlock(&item->mutex);
29452 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
29453 struct drm_global_item *item = &glob[ref->global_type];
29454
29455 mutex_lock(&item->mutex);
29456 - BUG_ON(item->refcount == 0);
29457 + BUG_ON(atomic_read(&item->refcount) == 0);
29458 BUG_ON(ref->object != item->object);
29459 - if (--item->refcount == 0) {
29460 + if (atomic_dec_and_test(&item->refcount)) {
29461 ref->release(ref);
29462 item->object = NULL;
29463 }
29464 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
29465 index ab1162d..42587b2 100644
29466 --- a/drivers/gpu/drm/drm_info.c
29467 +++ b/drivers/gpu/drm/drm_info.c
29468 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
29469 struct drm_local_map *map;
29470 struct drm_map_list *r_list;
29471
29472 - /* Hardcoded from _DRM_FRAME_BUFFER,
29473 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29474 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29475 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29476 + static const char * const types[] = {
29477 + [_DRM_FRAME_BUFFER] = "FB",
29478 + [_DRM_REGISTERS] = "REG",
29479 + [_DRM_SHM] = "SHM",
29480 + [_DRM_AGP] = "AGP",
29481 + [_DRM_SCATTER_GATHER] = "SG",
29482 + [_DRM_CONSISTENT] = "PCI",
29483 + [_DRM_GEM] = "GEM" };
29484 const char *type;
29485 int i;
29486
29487 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
29488 map = r_list->map;
29489 if (!map)
29490 continue;
29491 - if (map->type < 0 || map->type > 5)
29492 + if (map->type >= ARRAY_SIZE(types))
29493 type = "??";
29494 else
29495 type = types[map->type];
29496 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
29497 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29498 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29499 vma->vm_flags & VM_IO ? 'i' : '-',
29500 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29501 + 0);
29502 +#else
29503 vma->vm_pgoff);
29504 +#endif
29505
29506 #if defined(__i386__)
29507 pgprot = pgprot_val(vma->vm_page_prot);
29508 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
29509 index 637fcc3..e890b33 100644
29510 --- a/drivers/gpu/drm/drm_ioc32.c
29511 +++ b/drivers/gpu/drm/drm_ioc32.c
29512 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
29513 request = compat_alloc_user_space(nbytes);
29514 if (!access_ok(VERIFY_WRITE, request, nbytes))
29515 return -EFAULT;
29516 - list = (struct drm_buf_desc *) (request + 1);
29517 + list = (struct drm_buf_desc __user *) (request + 1);
29518
29519 if (__put_user(count, &request->count)
29520 || __put_user(list, &request->list))
29521 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
29522 request = compat_alloc_user_space(nbytes);
29523 if (!access_ok(VERIFY_WRITE, request, nbytes))
29524 return -EFAULT;
29525 - list = (struct drm_buf_pub *) (request + 1);
29526 + list = (struct drm_buf_pub __user *) (request + 1);
29527
29528 if (__put_user(count, &request->count)
29529 || __put_user(list, &request->list))
29530 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
29531 index 956fd38..e52167a 100644
29532 --- a/drivers/gpu/drm/drm_ioctl.c
29533 +++ b/drivers/gpu/drm/drm_ioctl.c
29534 @@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
29535 stats->data[i].value =
29536 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29537 else
29538 - stats->data[i].value = atomic_read(&dev->counts[i]);
29539 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29540 stats->data[i].type = dev->types[i];
29541 }
29542
29543 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
29544 index c79c713..2048588 100644
29545 --- a/drivers/gpu/drm/drm_lock.c
29546 +++ b/drivers/gpu/drm/drm_lock.c
29547 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29548 if (drm_lock_take(&master->lock, lock->context)) {
29549 master->lock.file_priv = file_priv;
29550 master->lock.lock_time = jiffies;
29551 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29552 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29553 break; /* Got lock */
29554 }
29555
29556 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29557 return -EINVAL;
29558 }
29559
29560 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29561 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29562
29563 if (drm_lock_free(&master->lock, lock->context)) {
29564 /* FIXME: Should really bail out here. */
29565 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
29566 index 7f4b4e1..bf4def2 100644
29567 --- a/drivers/gpu/drm/i810/i810_dma.c
29568 +++ b/drivers/gpu/drm/i810/i810_dma.c
29569 @@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
29570 dma->buflist[vertex->idx],
29571 vertex->discard, vertex->used);
29572
29573 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29574 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29575 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29576 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29577 sarea_priv->last_enqueue = dev_priv->counter - 1;
29578 sarea_priv->last_dispatch = (int)hw_status[5];
29579
29580 @@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
29581 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29582 mc->last_render);
29583
29584 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29585 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29586 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29587 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29588 sarea_priv->last_enqueue = dev_priv->counter - 1;
29589 sarea_priv->last_dispatch = (int)hw_status[5];
29590
29591 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
29592 index c9339f4..f5e1b9d 100644
29593 --- a/drivers/gpu/drm/i810/i810_drv.h
29594 +++ b/drivers/gpu/drm/i810/i810_drv.h
29595 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29596 int page_flipping;
29597
29598 wait_queue_head_t irq_queue;
29599 - atomic_t irq_received;
29600 - atomic_t irq_emitted;
29601 + atomic_unchecked_t irq_received;
29602 + atomic_unchecked_t irq_emitted;
29603
29604 int front_offset;
29605 } drm_i810_private_t;
29606 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
29607 index deaa657..e0fd296 100644
29608 --- a/drivers/gpu/drm/i915/i915_debugfs.c
29609 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
29610 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
29611 I915_READ(GTIMR));
29612 }
29613 seq_printf(m, "Interrupts received: %d\n",
29614 - atomic_read(&dev_priv->irq_received));
29615 + atomic_read_unchecked(&dev_priv->irq_received));
29616 for (i = 0; i < I915_NUM_RINGS; i++) {
29617 if (IS_GEN6(dev) || IS_GEN7(dev)) {
29618 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
29619 @@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
29620 return ret;
29621
29622 if (opregion->header)
29623 - seq_write(m, opregion->header, OPREGION_SIZE);
29624 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
29625
29626 mutex_unlock(&dev->struct_mutex);
29627
29628 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
29629 index ddfe3d9..f6e6b21 100644
29630 --- a/drivers/gpu/drm/i915/i915_dma.c
29631 +++ b/drivers/gpu/drm/i915/i915_dma.c
29632 @@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
29633 bool can_switch;
29634
29635 spin_lock(&dev->count_lock);
29636 - can_switch = (dev->open_count == 0);
29637 + can_switch = (local_read(&dev->open_count) == 0);
29638 spin_unlock(&dev->count_lock);
29639 return can_switch;
29640 }
29641 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
29642 index 9689ca3..294f9c1 100644
29643 --- a/drivers/gpu/drm/i915/i915_drv.h
29644 +++ b/drivers/gpu/drm/i915/i915_drv.h
29645 @@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
29646 /* render clock increase/decrease */
29647 /* display clock increase/decrease */
29648 /* pll clock increase/decrease */
29649 -};
29650 +} __no_const;
29651
29652 struct intel_device_info {
29653 u8 gen;
29654 @@ -320,7 +320,7 @@ typedef struct drm_i915_private {
29655 int current_page;
29656 int page_flipping;
29657
29658 - atomic_t irq_received;
29659 + atomic_unchecked_t irq_received;
29660
29661 /* protects the irq masks */
29662 spinlock_t irq_lock;
29663 @@ -896,7 +896,7 @@ struct drm_i915_gem_object {
29664 * will be page flipped away on the next vblank. When it
29665 * reaches 0, dev_priv->pending_flip_queue will be woken up.
29666 */
29667 - atomic_t pending_flip;
29668 + atomic_unchecked_t pending_flip;
29669 };
29670
29671 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
29672 @@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
29673 extern void intel_teardown_gmbus(struct drm_device *dev);
29674 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
29675 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
29676 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29677 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29678 {
29679 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
29680 }
29681 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29682 index 65e1f00..a30ef00 100644
29683 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29684 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29685 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
29686 i915_gem_clflush_object(obj);
29687
29688 if (obj->base.pending_write_domain)
29689 - cd->flips |= atomic_read(&obj->pending_flip);
29690 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
29691
29692 /* The actual obj->write_domain will be updated with
29693 * pending_write_domain after we emit the accumulated flush for all
29694 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
29695
29696 static int
29697 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
29698 - int count)
29699 + unsigned int count)
29700 {
29701 - int i;
29702 + unsigned int i;
29703
29704 for (i = 0; i < count; i++) {
29705 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
29706 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
29707 index 5bd4361..0241a42 100644
29708 --- a/drivers/gpu/drm/i915/i915_irq.c
29709 +++ b/drivers/gpu/drm/i915/i915_irq.c
29710 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
29711 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
29712 struct drm_i915_master_private *master_priv;
29713
29714 - atomic_inc(&dev_priv->irq_received);
29715 + atomic_inc_unchecked(&dev_priv->irq_received);
29716
29717 /* disable master interrupt before clearing iir */
29718 de_ier = I915_READ(DEIER);
29719 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
29720 struct drm_i915_master_private *master_priv;
29721 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
29722
29723 - atomic_inc(&dev_priv->irq_received);
29724 + atomic_inc_unchecked(&dev_priv->irq_received);
29725
29726 if (IS_GEN6(dev))
29727 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
29728 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
29729 int ret = IRQ_NONE, pipe;
29730 bool blc_event = false;
29731
29732 - atomic_inc(&dev_priv->irq_received);
29733 + atomic_inc_unchecked(&dev_priv->irq_received);
29734
29735 iir = I915_READ(IIR);
29736
29737 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
29738 {
29739 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29740
29741 - atomic_set(&dev_priv->irq_received, 0);
29742 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29743
29744 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29745 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29746 @@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
29747 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29748 int pipe;
29749
29750 - atomic_set(&dev_priv->irq_received, 0);
29751 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29752
29753 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29754 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29755 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
29756 index 397087c..9178d0d 100644
29757 --- a/drivers/gpu/drm/i915/intel_display.c
29758 +++ b/drivers/gpu/drm/i915/intel_display.c
29759 @@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
29760
29761 wait_event(dev_priv->pending_flip_queue,
29762 atomic_read(&dev_priv->mm.wedged) ||
29763 - atomic_read(&obj->pending_flip) == 0);
29764 + atomic_read_unchecked(&obj->pending_flip) == 0);
29765
29766 /* Big Hammer, we also need to ensure that any pending
29767 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
29768 @@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
29769 obj = to_intel_framebuffer(crtc->fb)->obj;
29770 dev_priv = crtc->dev->dev_private;
29771 wait_event(dev_priv->pending_flip_queue,
29772 - atomic_read(&obj->pending_flip) == 0);
29773 + atomic_read_unchecked(&obj->pending_flip) == 0);
29774 }
29775
29776 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
29777 @@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
29778
29779 atomic_clear_mask(1 << intel_crtc->plane,
29780 &obj->pending_flip.counter);
29781 - if (atomic_read(&obj->pending_flip) == 0)
29782 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
29783 wake_up(&dev_priv->pending_flip_queue);
29784
29785 schedule_work(&work->work);
29786 @@ -7461,7 +7461,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29787 /* Block clients from rendering to the new back buffer until
29788 * the flip occurs and the object is no longer visible.
29789 */
29790 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29791 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29792
29793 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
29794 if (ret)
29795 @@ -7475,7 +7475,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29796 return 0;
29797
29798 cleanup_pending:
29799 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29800 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29801 drm_gem_object_unreference(&work->old_fb_obj->base);
29802 drm_gem_object_unreference(&obj->base);
29803 mutex_unlock(&dev->struct_mutex);
29804 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
29805 index 54558a0..2d97005 100644
29806 --- a/drivers/gpu/drm/mga/mga_drv.h
29807 +++ b/drivers/gpu/drm/mga/mga_drv.h
29808 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29809 u32 clear_cmd;
29810 u32 maccess;
29811
29812 - atomic_t vbl_received; /**< Number of vblanks received. */
29813 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29814 wait_queue_head_t fence_queue;
29815 - atomic_t last_fence_retired;
29816 + atomic_unchecked_t last_fence_retired;
29817 u32 next_fence_to_post;
29818
29819 unsigned int fb_cpp;
29820 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
29821 index 2581202..f230a8d9 100644
29822 --- a/drivers/gpu/drm/mga/mga_irq.c
29823 +++ b/drivers/gpu/drm/mga/mga_irq.c
29824 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
29825 if (crtc != 0)
29826 return 0;
29827
29828 - return atomic_read(&dev_priv->vbl_received);
29829 + return atomic_read_unchecked(&dev_priv->vbl_received);
29830 }
29831
29832
29833 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29834 /* VBLANK interrupt */
29835 if (status & MGA_VLINEPEN) {
29836 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29837 - atomic_inc(&dev_priv->vbl_received);
29838 + atomic_inc_unchecked(&dev_priv->vbl_received);
29839 drm_handle_vblank(dev, 0);
29840 handled = 1;
29841 }
29842 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29843 if ((prim_start & ~0x03) != (prim_end & ~0x03))
29844 MGA_WRITE(MGA_PRIMEND, prim_end);
29845
29846 - atomic_inc(&dev_priv->last_fence_retired);
29847 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29848 DRM_WAKEUP(&dev_priv->fence_queue);
29849 handled = 1;
29850 }
29851 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
29852 * using fences.
29853 */
29854 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29855 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29856 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29857 - *sequence) <= (1 << 23)));
29858
29859 *sequence = cur_fence;
29860 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
29861 index e5cbead..6c354a3 100644
29862 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
29863 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
29864 @@ -199,7 +199,7 @@ struct methods {
29865 const char desc[8];
29866 void (*loadbios)(struct drm_device *, uint8_t *);
29867 const bool rw;
29868 -};
29869 +} __do_const;
29870
29871 static struct methods shadow_methods[] = {
29872 { "PRAMIN", load_vbios_pramin, true },
29873 @@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
29874 struct bit_table {
29875 const char id;
29876 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
29877 -};
29878 +} __no_const;
29879
29880 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
29881
29882 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
29883 index b827098..c31a797 100644
29884 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
29885 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
29886 @@ -242,7 +242,7 @@ struct nouveau_channel {
29887 struct list_head pending;
29888 uint32_t sequence;
29889 uint32_t sequence_ack;
29890 - atomic_t last_sequence_irq;
29891 + atomic_unchecked_t last_sequence_irq;
29892 struct nouveau_vma vma;
29893 } fence;
29894
29895 @@ -323,7 +323,7 @@ struct nouveau_exec_engine {
29896 u32 handle, u16 class);
29897 void (*set_tile_region)(struct drm_device *dev, int i);
29898 void (*tlb_flush)(struct drm_device *, int engine);
29899 -};
29900 +} __no_const;
29901
29902 struct nouveau_instmem_engine {
29903 void *priv;
29904 @@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
29905 struct nouveau_mc_engine {
29906 int (*init)(struct drm_device *dev);
29907 void (*takedown)(struct drm_device *dev);
29908 -};
29909 +} __no_const;
29910
29911 struct nouveau_timer_engine {
29912 int (*init)(struct drm_device *dev);
29913 void (*takedown)(struct drm_device *dev);
29914 uint64_t (*read)(struct drm_device *dev);
29915 -};
29916 +} __no_const;
29917
29918 struct nouveau_fb_engine {
29919 int num_tiles;
29920 @@ -566,7 +566,7 @@ struct nouveau_vram_engine {
29921 void (*put)(struct drm_device *, struct nouveau_mem **);
29922
29923 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
29924 -};
29925 +} __no_const;
29926
29927 struct nouveau_engine {
29928 struct nouveau_instmem_engine instmem;
29929 @@ -714,7 +714,7 @@ struct drm_nouveau_private {
29930 struct drm_global_reference mem_global_ref;
29931 struct ttm_bo_global_ref bo_global_ref;
29932 struct ttm_bo_device bdev;
29933 - atomic_t validate_sequence;
29934 + atomic_unchecked_t validate_sequence;
29935 } ttm;
29936
29937 struct {
29938 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
29939 index 2f6daae..c9d7b9e 100644
29940 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
29941 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
29942 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
29943 if (USE_REFCNT(dev))
29944 sequence = nvchan_rd32(chan, 0x48);
29945 else
29946 - sequence = atomic_read(&chan->fence.last_sequence_irq);
29947 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
29948
29949 if (chan->fence.sequence_ack == sequence)
29950 goto out;
29951 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
29952 return ret;
29953 }
29954
29955 - atomic_set(&chan->fence.last_sequence_irq, 0);
29956 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
29957 return 0;
29958 }
29959
29960 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
29961 index 7ce3fde..cb3ea04 100644
29962 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
29963 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
29964 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
29965 int trycnt = 0;
29966 int ret, i;
29967
29968 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
29969 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
29970 retry:
29971 if (++trycnt > 100000) {
29972 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
29973 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
29974 index f80c5e0..936baa7 100644
29975 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
29976 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
29977 @@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
29978 bool can_switch;
29979
29980 spin_lock(&dev->count_lock);
29981 - can_switch = (dev->open_count == 0);
29982 + can_switch = (local_read(&dev->open_count) == 0);
29983 spin_unlock(&dev->count_lock);
29984 return can_switch;
29985 }
29986 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
29987 index dbdea8e..cd6eeeb 100644
29988 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
29989 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
29990 @@ -554,7 +554,7 @@ static int
29991 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
29992 u32 class, u32 mthd, u32 data)
29993 {
29994 - atomic_set(&chan->fence.last_sequence_irq, data);
29995 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
29996 return 0;
29997 }
29998
29999 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30000 index bcac90b..53bfc76 100644
30001 --- a/drivers/gpu/drm/r128/r128_cce.c
30002 +++ b/drivers/gpu/drm/r128/r128_cce.c
30003 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30004
30005 /* GH: Simple idle check.
30006 */
30007 - atomic_set(&dev_priv->idle_count, 0);
30008 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30009
30010 /* We don't support anything other than bus-mastering ring mode,
30011 * but the ring can be in either AGP or PCI space for the ring
30012 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30013 index 930c71b..499aded 100644
30014 --- a/drivers/gpu/drm/r128/r128_drv.h
30015 +++ b/drivers/gpu/drm/r128/r128_drv.h
30016 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30017 int is_pci;
30018 unsigned long cce_buffers_offset;
30019
30020 - atomic_t idle_count;
30021 + atomic_unchecked_t idle_count;
30022
30023 int page_flipping;
30024 int current_page;
30025 u32 crtc_offset;
30026 u32 crtc_offset_cntl;
30027
30028 - atomic_t vbl_received;
30029 + atomic_unchecked_t vbl_received;
30030
30031 u32 color_fmt;
30032 unsigned int front_offset;
30033 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30034 index 429d5a0..7e899ed 100644
30035 --- a/drivers/gpu/drm/r128/r128_irq.c
30036 +++ b/drivers/gpu/drm/r128/r128_irq.c
30037 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30038 if (crtc != 0)
30039 return 0;
30040
30041 - return atomic_read(&dev_priv->vbl_received);
30042 + return atomic_read_unchecked(&dev_priv->vbl_received);
30043 }
30044
30045 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30046 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30047 /* VBLANK interrupt */
30048 if (status & R128_CRTC_VBLANK_INT) {
30049 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30050 - atomic_inc(&dev_priv->vbl_received);
30051 + atomic_inc_unchecked(&dev_priv->vbl_received);
30052 drm_handle_vblank(dev, 0);
30053 return IRQ_HANDLED;
30054 }
30055 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30056 index a9e33ce..09edd4b 100644
30057 --- a/drivers/gpu/drm/r128/r128_state.c
30058 +++ b/drivers/gpu/drm/r128/r128_state.c
30059 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30060
30061 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30062 {
30063 - if (atomic_read(&dev_priv->idle_count) == 0)
30064 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30065 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30066 else
30067 - atomic_set(&dev_priv->idle_count, 0);
30068 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30069 }
30070
30071 #endif
30072 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30073 index 5a82b6b..9e69c73 100644
30074 --- a/drivers/gpu/drm/radeon/mkregtable.c
30075 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30076 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30077 regex_t mask_rex;
30078 regmatch_t match[4];
30079 char buf[1024];
30080 - size_t end;
30081 + long end;
30082 int len;
30083 int done = 0;
30084 int r;
30085 unsigned o;
30086 struct offset *offset;
30087 char last_reg_s[10];
30088 - int last_reg;
30089 + unsigned long last_reg;
30090
30091 if (regcomp
30092 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30093 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30094 index 1668ec1..30ebdab 100644
30095 --- a/drivers/gpu/drm/radeon/radeon.h
30096 +++ b/drivers/gpu/drm/radeon/radeon.h
30097 @@ -250,7 +250,7 @@ struct radeon_fence_driver {
30098 uint32_t scratch_reg;
30099 uint64_t gpu_addr;
30100 volatile uint32_t *cpu_addr;
30101 - atomic_t seq;
30102 + atomic_unchecked_t seq;
30103 uint32_t last_seq;
30104 unsigned long last_jiffies;
30105 unsigned long last_timeout;
30106 @@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
30107 int x2, int y2);
30108 void (*draw_auto)(struct radeon_device *rdev);
30109 void (*set_default_state)(struct radeon_device *rdev);
30110 -};
30111 +} __no_const;
30112
30113 struct r600_blit {
30114 struct mutex mutex;
30115 @@ -1201,7 +1201,7 @@ struct radeon_asic {
30116 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30117 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30118 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30119 -};
30120 +} __no_const;
30121
30122 /*
30123 * Asic structures
30124 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30125 index 49f7cb7..2fcb48f 100644
30126 --- a/drivers/gpu/drm/radeon/radeon_device.c
30127 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30128 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30129 bool can_switch;
30130
30131 spin_lock(&dev->count_lock);
30132 - can_switch = (dev->open_count == 0);
30133 + can_switch = (local_read(&dev->open_count) == 0);
30134 spin_unlock(&dev->count_lock);
30135 return can_switch;
30136 }
30137 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30138 index a1b59ca..86f2d44 100644
30139 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30140 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30141 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30142
30143 /* SW interrupt */
30144 wait_queue_head_t swi_queue;
30145 - atomic_t swi_emitted;
30146 + atomic_unchecked_t swi_emitted;
30147 int vblank_crtc;
30148 uint32_t irq_enable_reg;
30149 uint32_t r500_disp_irq_reg;
30150 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30151 index 4bd36a3..e66fe9c 100644
30152 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30153 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30154 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30155 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30156 return 0;
30157 }
30158 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30159 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30160 if (!rdev->ring[fence->ring].ready)
30161 /* FIXME: cp is not running assume everythings is done right
30162 * away
30163 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30164 }
30165 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30166 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30167 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30168 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30169 rdev->fence_drv[ring].initialized = true;
30170 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30171 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30172 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30173 rdev->fence_drv[ring].scratch_reg = -1;
30174 rdev->fence_drv[ring].cpu_addr = NULL;
30175 rdev->fence_drv[ring].gpu_addr = 0;
30176 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30177 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30178 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30179 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30180 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30181 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30182 index 48b7cea..342236f 100644
30183 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30184 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30185 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30186 request = compat_alloc_user_space(sizeof(*request));
30187 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30188 || __put_user(req32.param, &request->param)
30189 - || __put_user((void __user *)(unsigned long)req32.value,
30190 + || __put_user((unsigned long)req32.value,
30191 &request->value))
30192 return -EFAULT;
30193
30194 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30195 index 00da384..32f972d 100644
30196 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30197 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30198 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30199 unsigned int ret;
30200 RING_LOCALS;
30201
30202 - atomic_inc(&dev_priv->swi_emitted);
30203 - ret = atomic_read(&dev_priv->swi_emitted);
30204 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30205 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30206
30207 BEGIN_RING(4);
30208 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30209 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30210 drm_radeon_private_t *dev_priv =
30211 (drm_radeon_private_t *) dev->dev_private;
30212
30213 - atomic_set(&dev_priv->swi_emitted, 0);
30214 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30215 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30216
30217 dev->max_vblank_count = 0x001fffff;
30218 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30219 index e8422ae..d22d4a8 100644
30220 --- a/drivers/gpu/drm/radeon/radeon_state.c
30221 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30222 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30223 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30224 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30225
30226 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30227 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30228 sarea_priv->nbox * sizeof(depth_boxes[0])))
30229 return -EFAULT;
30230
30231 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30232 {
30233 drm_radeon_private_t *dev_priv = dev->dev_private;
30234 drm_radeon_getparam_t *param = data;
30235 - int value;
30236 + int value = 0;
30237
30238 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30239
30240 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30241 index c421e77..e6bf2e8 100644
30242 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30243 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30244 @@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30245 }
30246 if (unlikely(ttm_vm_ops == NULL)) {
30247 ttm_vm_ops = vma->vm_ops;
30248 - radeon_ttm_vm_ops = *ttm_vm_ops;
30249 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30250 + pax_open_kernel();
30251 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30252 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30253 + pax_close_kernel();
30254 }
30255 vma->vm_ops = &radeon_ttm_vm_ops;
30256 return 0;
30257 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30258 index f68dff2..8df955c 100644
30259 --- a/drivers/gpu/drm/radeon/rs690.c
30260 +++ b/drivers/gpu/drm/radeon/rs690.c
30261 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30262 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30263 rdev->pm.sideport_bandwidth.full)
30264 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30265 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30266 + read_delay_latency.full = dfixed_const(800 * 1000);
30267 read_delay_latency.full = dfixed_div(read_delay_latency,
30268 rdev->pm.igp_sideport_mclk);
30269 + a.full = dfixed_const(370);
30270 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30271 } else {
30272 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30273 rdev->pm.k8_bandwidth.full)
30274 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30275 index 499debd..66fce72 100644
30276 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30277 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30278 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30279 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30280 struct shrink_control *sc)
30281 {
30282 - static atomic_t start_pool = ATOMIC_INIT(0);
30283 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30284 unsigned i;
30285 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30286 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30287 struct ttm_page_pool *pool;
30288 int shrink_pages = sc->nr_to_scan;
30289
30290 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30291 index 88edacc..1e5412b 100644
30292 --- a/drivers/gpu/drm/via/via_drv.h
30293 +++ b/drivers/gpu/drm/via/via_drv.h
30294 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30295 typedef uint32_t maskarray_t[5];
30296
30297 typedef struct drm_via_irq {
30298 - atomic_t irq_received;
30299 + atomic_unchecked_t irq_received;
30300 uint32_t pending_mask;
30301 uint32_t enable_mask;
30302 wait_queue_head_t irq_queue;
30303 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30304 struct timeval last_vblank;
30305 int last_vblank_valid;
30306 unsigned usec_per_vblank;
30307 - atomic_t vbl_received;
30308 + atomic_unchecked_t vbl_received;
30309 drm_via_state_t hc_state;
30310 char pci_buf[VIA_PCI_BUF_SIZE];
30311 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30312 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30313 index d391f48..10c8ca3 100644
30314 --- a/drivers/gpu/drm/via/via_irq.c
30315 +++ b/drivers/gpu/drm/via/via_irq.c
30316 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30317 if (crtc != 0)
30318 return 0;
30319
30320 - return atomic_read(&dev_priv->vbl_received);
30321 + return atomic_read_unchecked(&dev_priv->vbl_received);
30322 }
30323
30324 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30325 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30326
30327 status = VIA_READ(VIA_REG_INTERRUPT);
30328 if (status & VIA_IRQ_VBLANK_PENDING) {
30329 - atomic_inc(&dev_priv->vbl_received);
30330 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30331 + atomic_inc_unchecked(&dev_priv->vbl_received);
30332 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30333 do_gettimeofday(&cur_vblank);
30334 if (dev_priv->last_vblank_valid) {
30335 dev_priv->usec_per_vblank =
30336 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30337 dev_priv->last_vblank = cur_vblank;
30338 dev_priv->last_vblank_valid = 1;
30339 }
30340 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30341 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30342 DRM_DEBUG("US per vblank is: %u\n",
30343 dev_priv->usec_per_vblank);
30344 }
30345 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30346
30347 for (i = 0; i < dev_priv->num_irqs; ++i) {
30348 if (status & cur_irq->pending_mask) {
30349 - atomic_inc(&cur_irq->irq_received);
30350 + atomic_inc_unchecked(&cur_irq->irq_received);
30351 DRM_WAKEUP(&cur_irq->irq_queue);
30352 handled = 1;
30353 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30354 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30355 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30356 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30357 masks[irq][4]));
30358 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30359 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30360 } else {
30361 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30362 (((cur_irq_sequence =
30363 - atomic_read(&cur_irq->irq_received)) -
30364 + atomic_read_unchecked(&cur_irq->irq_received)) -
30365 *sequence) <= (1 << 23)));
30366 }
30367 *sequence = cur_irq_sequence;
30368 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30369 }
30370
30371 for (i = 0; i < dev_priv->num_irqs; ++i) {
30372 - atomic_set(&cur_irq->irq_received, 0);
30373 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30374 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30375 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30376 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30377 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30378 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30379 case VIA_IRQ_RELATIVE:
30380 irqwait->request.sequence +=
30381 - atomic_read(&cur_irq->irq_received);
30382 + atomic_read_unchecked(&cur_irq->irq_received);
30383 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30384 case VIA_IRQ_ABSOLUTE:
30385 break;
30386 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30387 index dc27970..f18b008 100644
30388 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30389 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30390 @@ -260,7 +260,7 @@ struct vmw_private {
30391 * Fencing and IRQs.
30392 */
30393
30394 - atomic_t marker_seq;
30395 + atomic_unchecked_t marker_seq;
30396 wait_queue_head_t fence_queue;
30397 wait_queue_head_t fifo_queue;
30398 int fence_queue_waiters; /* Protected by hw_mutex */
30399 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30400 index a0c2f12..68ae6cb 100644
30401 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30402 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30403 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
30404 (unsigned int) min,
30405 (unsigned int) fifo->capabilities);
30406
30407 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30408 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30409 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
30410 vmw_marker_queue_init(&fifo->marker_queue);
30411 return vmw_fifo_send_fence(dev_priv, &dummy);
30412 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
30413 if (reserveable)
30414 iowrite32(bytes, fifo_mem +
30415 SVGA_FIFO_RESERVED);
30416 - return fifo_mem + (next_cmd >> 2);
30417 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
30418 } else {
30419 need_bounce = true;
30420 }
30421 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30422
30423 fm = vmw_fifo_reserve(dev_priv, bytes);
30424 if (unlikely(fm == NULL)) {
30425 - *seqno = atomic_read(&dev_priv->marker_seq);
30426 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30427 ret = -ENOMEM;
30428 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
30429 false, 3*HZ);
30430 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30431 }
30432
30433 do {
30434 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
30435 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
30436 } while (*seqno == 0);
30437
30438 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
30439 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30440 index cabc95f..14b3d77 100644
30441 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30442 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30443 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
30444 * emitted. Then the fence is stale and signaled.
30445 */
30446
30447 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
30448 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
30449 > VMW_FENCE_WRAP);
30450
30451 return ret;
30452 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
30453
30454 if (fifo_idle)
30455 down_read(&fifo_state->rwsem);
30456 - signal_seq = atomic_read(&dev_priv->marker_seq);
30457 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
30458 ret = 0;
30459
30460 for (;;) {
30461 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30462 index 8a8725c..afed796 100644
30463 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30464 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30465 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
30466 while (!vmw_lag_lt(queue, us)) {
30467 spin_lock(&queue->lock);
30468 if (list_empty(&queue->head))
30469 - seqno = atomic_read(&dev_priv->marker_seq);
30470 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30471 else {
30472 marker = list_first_entry(&queue->head,
30473 struct vmw_marker, head);
30474 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
30475 index af08ce7..7a15038 100644
30476 --- a/drivers/hid/hid-core.c
30477 +++ b/drivers/hid/hid-core.c
30478 @@ -2020,7 +2020,7 @@ static bool hid_ignore(struct hid_device *hdev)
30479
30480 int hid_add_device(struct hid_device *hdev)
30481 {
30482 - static atomic_t id = ATOMIC_INIT(0);
30483 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30484 int ret;
30485
30486 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30487 @@ -2035,7 +2035,7 @@ int hid_add_device(struct hid_device *hdev)
30488 /* XXX hack, any other cleaner solution after the driver core
30489 * is converted to allow more than 20 bytes as the device name? */
30490 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30491 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30492 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30493
30494 hid_debug_register(hdev, dev_name(&hdev->dev));
30495 ret = device_add(&hdev->dev);
30496 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
30497 index b1ec0e2..c295a61 100644
30498 --- a/drivers/hid/usbhid/hiddev.c
30499 +++ b/drivers/hid/usbhid/hiddev.c
30500 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
30501 break;
30502
30503 case HIDIOCAPPLICATION:
30504 - if (arg < 0 || arg >= hid->maxapplication)
30505 + if (arg >= hid->maxapplication)
30506 break;
30507
30508 for (i = 0; i < hid->maxcollection; i++)
30509 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
30510 index 4065374..10ed7dc 100644
30511 --- a/drivers/hv/channel.c
30512 +++ b/drivers/hv/channel.c
30513 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
30514 int ret = 0;
30515 int t;
30516
30517 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
30518 - atomic_inc(&vmbus_connection.next_gpadl_handle);
30519 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
30520 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
30521
30522 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
30523 if (ret)
30524 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
30525 index 12aa97f..c0679f7 100644
30526 --- a/drivers/hv/hv.c
30527 +++ b/drivers/hv/hv.c
30528 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
30529 u64 output_address = (output) ? virt_to_phys(output) : 0;
30530 u32 output_address_hi = output_address >> 32;
30531 u32 output_address_lo = output_address & 0xFFFFFFFF;
30532 - void *hypercall_page = hv_context.hypercall_page;
30533 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
30534
30535 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
30536 "=a"(hv_status_lo) : "d" (control_hi),
30537 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
30538 index 6d7d286..92b0873 100644
30539 --- a/drivers/hv/hyperv_vmbus.h
30540 +++ b/drivers/hv/hyperv_vmbus.h
30541 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
30542 struct vmbus_connection {
30543 enum vmbus_connect_state conn_state;
30544
30545 - atomic_t next_gpadl_handle;
30546 + atomic_unchecked_t next_gpadl_handle;
30547
30548 /*
30549 * Represents channel interrupts. Each bit position represents a
30550 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
30551 index a220e57..428f54d 100644
30552 --- a/drivers/hv/vmbus_drv.c
30553 +++ b/drivers/hv/vmbus_drv.c
30554 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
30555 {
30556 int ret = 0;
30557
30558 - static atomic_t device_num = ATOMIC_INIT(0);
30559 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
30560
30561 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
30562 - atomic_inc_return(&device_num));
30563 + atomic_inc_return_unchecked(&device_num));
30564
30565 child_device_obj->device.bus = &hv_bus;
30566 child_device_obj->device.parent = &hv_acpi_dev->dev;
30567 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
30568 index 554f046..f8b4729 100644
30569 --- a/drivers/hwmon/acpi_power_meter.c
30570 +++ b/drivers/hwmon/acpi_power_meter.c
30571 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
30572 return res;
30573
30574 temp /= 1000;
30575 - if (temp < 0)
30576 - return -EINVAL;
30577
30578 mutex_lock(&resource->lock);
30579 resource->trip[attr->index - 7] = temp;
30580 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
30581 index 91fdd1f..b66a686 100644
30582 --- a/drivers/hwmon/sht15.c
30583 +++ b/drivers/hwmon/sht15.c
30584 @@ -166,7 +166,7 @@ struct sht15_data {
30585 int supply_uV;
30586 bool supply_uV_valid;
30587 struct work_struct update_supply_work;
30588 - atomic_t interrupt_handled;
30589 + atomic_unchecked_t interrupt_handled;
30590 };
30591
30592 /**
30593 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
30594 return ret;
30595
30596 gpio_direction_input(data->pdata->gpio_data);
30597 - atomic_set(&data->interrupt_handled, 0);
30598 + atomic_set_unchecked(&data->interrupt_handled, 0);
30599
30600 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30601 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30602 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30603 /* Only relevant if the interrupt hasn't occurred. */
30604 - if (!atomic_read(&data->interrupt_handled))
30605 + if (!atomic_read_unchecked(&data->interrupt_handled))
30606 schedule_work(&data->read_work);
30607 }
30608 ret = wait_event_timeout(data->wait_queue,
30609 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
30610
30611 /* First disable the interrupt */
30612 disable_irq_nosync(irq);
30613 - atomic_inc(&data->interrupt_handled);
30614 + atomic_inc_unchecked(&data->interrupt_handled);
30615 /* Then schedule a reading work struct */
30616 if (data->state != SHT15_READING_NOTHING)
30617 schedule_work(&data->read_work);
30618 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
30619 * If not, then start the interrupt again - care here as could
30620 * have gone low in meantime so verify it hasn't!
30621 */
30622 - atomic_set(&data->interrupt_handled, 0);
30623 + atomic_set_unchecked(&data->interrupt_handled, 0);
30624 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30625 /* If still not occurred or another handler has been scheduled */
30626 if (gpio_get_value(data->pdata->gpio_data)
30627 - || atomic_read(&data->interrupt_handled))
30628 + || atomic_read_unchecked(&data->interrupt_handled))
30629 return;
30630 }
30631
30632 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
30633 index 378fcb5..5e91fa8 100644
30634 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
30635 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
30636 @@ -43,7 +43,7 @@
30637 extern struct i2c_adapter amd756_smbus;
30638
30639 static struct i2c_adapter *s4882_adapter;
30640 -static struct i2c_algorithm *s4882_algo;
30641 +static i2c_algorithm_no_const *s4882_algo;
30642
30643 /* Wrapper access functions for multiplexed SMBus */
30644 static DEFINE_MUTEX(amd756_lock);
30645 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
30646 index 29015eb..af2d8e9 100644
30647 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
30648 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
30649 @@ -41,7 +41,7 @@
30650 extern struct i2c_adapter *nforce2_smbus;
30651
30652 static struct i2c_adapter *s4985_adapter;
30653 -static struct i2c_algorithm *s4985_algo;
30654 +static i2c_algorithm_no_const *s4985_algo;
30655
30656 /* Wrapper access functions for multiplexed SMBus */
30657 static DEFINE_MUTEX(nforce2_lock);
30658 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
30659 index d7a4833..7fae376 100644
30660 --- a/drivers/i2c/i2c-mux.c
30661 +++ b/drivers/i2c/i2c-mux.c
30662 @@ -28,7 +28,7 @@
30663 /* multiplexer per channel data */
30664 struct i2c_mux_priv {
30665 struct i2c_adapter adap;
30666 - struct i2c_algorithm algo;
30667 + i2c_algorithm_no_const algo;
30668
30669 struct i2c_adapter *parent;
30670 void *mux_dev; /* the mux chip/device */
30671 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
30672 index 57d00ca..0145194 100644
30673 --- a/drivers/ide/aec62xx.c
30674 +++ b/drivers/ide/aec62xx.c
30675 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
30676 .cable_detect = atp86x_cable_detect,
30677 };
30678
30679 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
30680 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
30681 { /* 0: AEC6210 */
30682 .name = DRV_NAME,
30683 .init_chipset = init_chipset_aec62xx,
30684 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
30685 index 2c8016a..911a27c 100644
30686 --- a/drivers/ide/alim15x3.c
30687 +++ b/drivers/ide/alim15x3.c
30688 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
30689 .dma_sff_read_status = ide_dma_sff_read_status,
30690 };
30691
30692 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
30693 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
30694 .name = DRV_NAME,
30695 .init_chipset = init_chipset_ali15x3,
30696 .init_hwif = init_hwif_ali15x3,
30697 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
30698 index 3747b25..56fc995 100644
30699 --- a/drivers/ide/amd74xx.c
30700 +++ b/drivers/ide/amd74xx.c
30701 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
30702 .udma_mask = udma, \
30703 }
30704
30705 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
30706 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
30707 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
30708 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
30709 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
30710 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
30711 index 15f0ead..cb43480 100644
30712 --- a/drivers/ide/atiixp.c
30713 +++ b/drivers/ide/atiixp.c
30714 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
30715 .cable_detect = atiixp_cable_detect,
30716 };
30717
30718 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
30719 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
30720 { /* 0: IXP200/300/400/700 */
30721 .name = DRV_NAME,
30722 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
30723 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
30724 index 5f80312..d1fc438 100644
30725 --- a/drivers/ide/cmd64x.c
30726 +++ b/drivers/ide/cmd64x.c
30727 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
30728 .dma_sff_read_status = ide_dma_sff_read_status,
30729 };
30730
30731 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
30732 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
30733 { /* 0: CMD643 */
30734 .name = DRV_NAME,
30735 .init_chipset = init_chipset_cmd64x,
30736 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
30737 index 2c1e5f7..1444762 100644
30738 --- a/drivers/ide/cs5520.c
30739 +++ b/drivers/ide/cs5520.c
30740 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
30741 .set_dma_mode = cs5520_set_dma_mode,
30742 };
30743
30744 -static const struct ide_port_info cyrix_chipset __devinitdata = {
30745 +static const struct ide_port_info cyrix_chipset __devinitconst = {
30746 .name = DRV_NAME,
30747 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
30748 .port_ops = &cs5520_port_ops,
30749 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
30750 index 4dc4eb9..49b40ad 100644
30751 --- a/drivers/ide/cs5530.c
30752 +++ b/drivers/ide/cs5530.c
30753 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
30754 .udma_filter = cs5530_udma_filter,
30755 };
30756
30757 -static const struct ide_port_info cs5530_chipset __devinitdata = {
30758 +static const struct ide_port_info cs5530_chipset __devinitconst = {
30759 .name = DRV_NAME,
30760 .init_chipset = init_chipset_cs5530,
30761 .init_hwif = init_hwif_cs5530,
30762 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
30763 index 5059faf..18d4c85 100644
30764 --- a/drivers/ide/cs5535.c
30765 +++ b/drivers/ide/cs5535.c
30766 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
30767 .cable_detect = cs5535_cable_detect,
30768 };
30769
30770 -static const struct ide_port_info cs5535_chipset __devinitdata = {
30771 +static const struct ide_port_info cs5535_chipset __devinitconst = {
30772 .name = DRV_NAME,
30773 .port_ops = &cs5535_port_ops,
30774 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
30775 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
30776 index 847553f..3ffb49d 100644
30777 --- a/drivers/ide/cy82c693.c
30778 +++ b/drivers/ide/cy82c693.c
30779 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
30780 .set_dma_mode = cy82c693_set_dma_mode,
30781 };
30782
30783 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
30784 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
30785 .name = DRV_NAME,
30786 .init_iops = init_iops_cy82c693,
30787 .port_ops = &cy82c693_port_ops,
30788 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
30789 index 58c51cd..4aec3b8 100644
30790 --- a/drivers/ide/hpt366.c
30791 +++ b/drivers/ide/hpt366.c
30792 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
30793 }
30794 };
30795
30796 -static const struct hpt_info hpt36x __devinitdata = {
30797 +static const struct hpt_info hpt36x __devinitconst = {
30798 .chip_name = "HPT36x",
30799 .chip_type = HPT36x,
30800 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
30801 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
30802 .timings = &hpt36x_timings
30803 };
30804
30805 -static const struct hpt_info hpt370 __devinitdata = {
30806 +static const struct hpt_info hpt370 __devinitconst = {
30807 .chip_name = "HPT370",
30808 .chip_type = HPT370,
30809 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30810 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
30811 .timings = &hpt37x_timings
30812 };
30813
30814 -static const struct hpt_info hpt370a __devinitdata = {
30815 +static const struct hpt_info hpt370a __devinitconst = {
30816 .chip_name = "HPT370A",
30817 .chip_type = HPT370A,
30818 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30819 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
30820 .timings = &hpt37x_timings
30821 };
30822
30823 -static const struct hpt_info hpt374 __devinitdata = {
30824 +static const struct hpt_info hpt374 __devinitconst = {
30825 .chip_name = "HPT374",
30826 .chip_type = HPT374,
30827 .udma_mask = ATA_UDMA5,
30828 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
30829 .timings = &hpt37x_timings
30830 };
30831
30832 -static const struct hpt_info hpt372 __devinitdata = {
30833 +static const struct hpt_info hpt372 __devinitconst = {
30834 .chip_name = "HPT372",
30835 .chip_type = HPT372,
30836 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30837 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
30838 .timings = &hpt37x_timings
30839 };
30840
30841 -static const struct hpt_info hpt372a __devinitdata = {
30842 +static const struct hpt_info hpt372a __devinitconst = {
30843 .chip_name = "HPT372A",
30844 .chip_type = HPT372A,
30845 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30846 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
30847 .timings = &hpt37x_timings
30848 };
30849
30850 -static const struct hpt_info hpt302 __devinitdata = {
30851 +static const struct hpt_info hpt302 __devinitconst = {
30852 .chip_name = "HPT302",
30853 .chip_type = HPT302,
30854 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30855 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
30856 .timings = &hpt37x_timings
30857 };
30858
30859 -static const struct hpt_info hpt371 __devinitdata = {
30860 +static const struct hpt_info hpt371 __devinitconst = {
30861 .chip_name = "HPT371",
30862 .chip_type = HPT371,
30863 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30864 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
30865 .timings = &hpt37x_timings
30866 };
30867
30868 -static const struct hpt_info hpt372n __devinitdata = {
30869 +static const struct hpt_info hpt372n __devinitconst = {
30870 .chip_name = "HPT372N",
30871 .chip_type = HPT372N,
30872 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30873 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
30874 .timings = &hpt37x_timings
30875 };
30876
30877 -static const struct hpt_info hpt302n __devinitdata = {
30878 +static const struct hpt_info hpt302n __devinitconst = {
30879 .chip_name = "HPT302N",
30880 .chip_type = HPT302N,
30881 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30882 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
30883 .timings = &hpt37x_timings
30884 };
30885
30886 -static const struct hpt_info hpt371n __devinitdata = {
30887 +static const struct hpt_info hpt371n __devinitconst = {
30888 .chip_name = "HPT371N",
30889 .chip_type = HPT371N,
30890 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30891 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
30892 .dma_sff_read_status = ide_dma_sff_read_status,
30893 };
30894
30895 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
30896 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
30897 { /* 0: HPT36x */
30898 .name = DRV_NAME,
30899 .init_chipset = init_chipset_hpt366,
30900 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
30901 index 8126824..55a2798 100644
30902 --- a/drivers/ide/ide-cd.c
30903 +++ b/drivers/ide/ide-cd.c
30904 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
30905 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30906 if ((unsigned long)buf & alignment
30907 || blk_rq_bytes(rq) & q->dma_pad_mask
30908 - || object_is_on_stack(buf))
30909 + || object_starts_on_stack(buf))
30910 drive->dma = 0;
30911 }
30912 }
30913 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
30914 index 7f56b73..dab5b67 100644
30915 --- a/drivers/ide/ide-pci-generic.c
30916 +++ b/drivers/ide/ide-pci-generic.c
30917 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
30918 .udma_mask = ATA_UDMA6, \
30919 }
30920
30921 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
30922 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
30923 /* 0: Unknown */
30924 DECLARE_GENERIC_PCI_DEV(0),
30925
30926 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
30927 index 560e66d..d5dd180 100644
30928 --- a/drivers/ide/it8172.c
30929 +++ b/drivers/ide/it8172.c
30930 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
30931 .set_dma_mode = it8172_set_dma_mode,
30932 };
30933
30934 -static const struct ide_port_info it8172_port_info __devinitdata = {
30935 +static const struct ide_port_info it8172_port_info __devinitconst = {
30936 .name = DRV_NAME,
30937 .port_ops = &it8172_port_ops,
30938 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
30939 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
30940 index 46816ba..1847aeb 100644
30941 --- a/drivers/ide/it8213.c
30942 +++ b/drivers/ide/it8213.c
30943 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
30944 .cable_detect = it8213_cable_detect,
30945 };
30946
30947 -static const struct ide_port_info it8213_chipset __devinitdata = {
30948 +static const struct ide_port_info it8213_chipset __devinitconst = {
30949 .name = DRV_NAME,
30950 .enablebits = { {0x41, 0x80, 0x80} },
30951 .port_ops = &it8213_port_ops,
30952 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
30953 index 2e3169f..c5611db 100644
30954 --- a/drivers/ide/it821x.c
30955 +++ b/drivers/ide/it821x.c
30956 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
30957 .cable_detect = it821x_cable_detect,
30958 };
30959
30960 -static const struct ide_port_info it821x_chipset __devinitdata = {
30961 +static const struct ide_port_info it821x_chipset __devinitconst = {
30962 .name = DRV_NAME,
30963 .init_chipset = init_chipset_it821x,
30964 .init_hwif = init_hwif_it821x,
30965 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
30966 index 74c2c4a..efddd7d 100644
30967 --- a/drivers/ide/jmicron.c
30968 +++ b/drivers/ide/jmicron.c
30969 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
30970 .cable_detect = jmicron_cable_detect,
30971 };
30972
30973 -static const struct ide_port_info jmicron_chipset __devinitdata = {
30974 +static const struct ide_port_info jmicron_chipset __devinitconst = {
30975 .name = DRV_NAME,
30976 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
30977 .port_ops = &jmicron_port_ops,
30978 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
30979 index 95327a2..73f78d8 100644
30980 --- a/drivers/ide/ns87415.c
30981 +++ b/drivers/ide/ns87415.c
30982 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
30983 .dma_sff_read_status = superio_dma_sff_read_status,
30984 };
30985
30986 -static const struct ide_port_info ns87415_chipset __devinitdata = {
30987 +static const struct ide_port_info ns87415_chipset __devinitconst = {
30988 .name = DRV_NAME,
30989 .init_hwif = init_hwif_ns87415,
30990 .tp_ops = &ns87415_tp_ops,
30991 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
30992 index 1a53a4c..39edc66 100644
30993 --- a/drivers/ide/opti621.c
30994 +++ b/drivers/ide/opti621.c
30995 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
30996 .set_pio_mode = opti621_set_pio_mode,
30997 };
30998
30999 -static const struct ide_port_info opti621_chipset __devinitdata = {
31000 +static const struct ide_port_info opti621_chipset __devinitconst = {
31001 .name = DRV_NAME,
31002 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31003 .port_ops = &opti621_port_ops,
31004 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31005 index 9546fe2..2e5ceb6 100644
31006 --- a/drivers/ide/pdc202xx_new.c
31007 +++ b/drivers/ide/pdc202xx_new.c
31008 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31009 .udma_mask = udma, \
31010 }
31011
31012 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31013 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31014 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31015 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31016 };
31017 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31018 index 3a35ec6..5634510 100644
31019 --- a/drivers/ide/pdc202xx_old.c
31020 +++ b/drivers/ide/pdc202xx_old.c
31021 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31022 .max_sectors = sectors, \
31023 }
31024
31025 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31026 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31027 { /* 0: PDC20246 */
31028 .name = DRV_NAME,
31029 .init_chipset = init_chipset_pdc202xx,
31030 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31031 index 1892e81..fe0fd60 100644
31032 --- a/drivers/ide/piix.c
31033 +++ b/drivers/ide/piix.c
31034 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31035 .udma_mask = udma, \
31036 }
31037
31038 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31039 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31040 /* 0: MPIIX */
31041 { /*
31042 * MPIIX actually has only a single IDE channel mapped to
31043 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31044 index a6414a8..c04173e 100644
31045 --- a/drivers/ide/rz1000.c
31046 +++ b/drivers/ide/rz1000.c
31047 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31048 }
31049 }
31050
31051 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31052 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31053 .name = DRV_NAME,
31054 .host_flags = IDE_HFLAG_NO_DMA,
31055 };
31056 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31057 index 356b9b5..d4758eb 100644
31058 --- a/drivers/ide/sc1200.c
31059 +++ b/drivers/ide/sc1200.c
31060 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31061 .dma_sff_read_status = ide_dma_sff_read_status,
31062 };
31063
31064 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31065 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31066 .name = DRV_NAME,
31067 .port_ops = &sc1200_port_ops,
31068 .dma_ops = &sc1200_dma_ops,
31069 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31070 index b7f5b0c..9701038 100644
31071 --- a/drivers/ide/scc_pata.c
31072 +++ b/drivers/ide/scc_pata.c
31073 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31074 .dma_sff_read_status = scc_dma_sff_read_status,
31075 };
31076
31077 -static const struct ide_port_info scc_chipset __devinitdata = {
31078 +static const struct ide_port_info scc_chipset __devinitconst = {
31079 .name = "sccIDE",
31080 .init_iops = init_iops_scc,
31081 .init_dma = scc_init_dma,
31082 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31083 index 35fb8da..24d72ef 100644
31084 --- a/drivers/ide/serverworks.c
31085 +++ b/drivers/ide/serverworks.c
31086 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31087 .cable_detect = svwks_cable_detect,
31088 };
31089
31090 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31091 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31092 { /* 0: OSB4 */
31093 .name = DRV_NAME,
31094 .init_chipset = init_chipset_svwks,
31095 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31096 index ddeda44..46f7e30 100644
31097 --- a/drivers/ide/siimage.c
31098 +++ b/drivers/ide/siimage.c
31099 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31100 .udma_mask = ATA_UDMA6, \
31101 }
31102
31103 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31104 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31105 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31106 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31107 };
31108 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31109 index 4a00225..09e61b4 100644
31110 --- a/drivers/ide/sis5513.c
31111 +++ b/drivers/ide/sis5513.c
31112 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31113 .cable_detect = sis_cable_detect,
31114 };
31115
31116 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31117 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31118 .name = DRV_NAME,
31119 .init_chipset = init_chipset_sis5513,
31120 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31121 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31122 index f21dc2a..d051cd2 100644
31123 --- a/drivers/ide/sl82c105.c
31124 +++ b/drivers/ide/sl82c105.c
31125 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31126 .dma_sff_read_status = ide_dma_sff_read_status,
31127 };
31128
31129 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31130 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31131 .name = DRV_NAME,
31132 .init_chipset = init_chipset_sl82c105,
31133 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31134 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31135 index 864ffe0..863a5e9 100644
31136 --- a/drivers/ide/slc90e66.c
31137 +++ b/drivers/ide/slc90e66.c
31138 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31139 .cable_detect = slc90e66_cable_detect,
31140 };
31141
31142 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31143 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31144 .name = DRV_NAME,
31145 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31146 .port_ops = &slc90e66_port_ops,
31147 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31148 index 4799d5c..1794678 100644
31149 --- a/drivers/ide/tc86c001.c
31150 +++ b/drivers/ide/tc86c001.c
31151 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31152 .dma_sff_read_status = ide_dma_sff_read_status,
31153 };
31154
31155 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31156 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31157 .name = DRV_NAME,
31158 .init_hwif = init_hwif_tc86c001,
31159 .port_ops = &tc86c001_port_ops,
31160 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31161 index 281c914..55ce1b8 100644
31162 --- a/drivers/ide/triflex.c
31163 +++ b/drivers/ide/triflex.c
31164 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31165 .set_dma_mode = triflex_set_mode,
31166 };
31167
31168 -static const struct ide_port_info triflex_device __devinitdata = {
31169 +static const struct ide_port_info triflex_device __devinitconst = {
31170 .name = DRV_NAME,
31171 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31172 .port_ops = &triflex_port_ops,
31173 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31174 index 4b42ca0..e494a98 100644
31175 --- a/drivers/ide/trm290.c
31176 +++ b/drivers/ide/trm290.c
31177 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31178 .dma_check = trm290_dma_check,
31179 };
31180
31181 -static const struct ide_port_info trm290_chipset __devinitdata = {
31182 +static const struct ide_port_info trm290_chipset __devinitconst = {
31183 .name = DRV_NAME,
31184 .init_hwif = init_hwif_trm290,
31185 .tp_ops = &trm290_tp_ops,
31186 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31187 index f46f49c..eb77678 100644
31188 --- a/drivers/ide/via82cxxx.c
31189 +++ b/drivers/ide/via82cxxx.c
31190 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31191 .cable_detect = via82cxxx_cable_detect,
31192 };
31193
31194 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31195 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31196 .name = DRV_NAME,
31197 .init_chipset = init_chipset_via82cxxx,
31198 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31199 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31200 index 73d4531..c90cd2d 100644
31201 --- a/drivers/ieee802154/fakehard.c
31202 +++ b/drivers/ieee802154/fakehard.c
31203 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31204 phy->transmit_power = 0xbf;
31205
31206 dev->netdev_ops = &fake_ops;
31207 - dev->ml_priv = &fake_mlme;
31208 + dev->ml_priv = (void *)&fake_mlme;
31209
31210 priv = netdev_priv(dev);
31211 priv->phy = phy;
31212 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31213 index c889aae..6cf5aa7 100644
31214 --- a/drivers/infiniband/core/cm.c
31215 +++ b/drivers/infiniband/core/cm.c
31216 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31217
31218 struct cm_counter_group {
31219 struct kobject obj;
31220 - atomic_long_t counter[CM_ATTR_COUNT];
31221 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31222 };
31223
31224 struct cm_counter_attribute {
31225 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31226 struct ib_mad_send_buf *msg = NULL;
31227 int ret;
31228
31229 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31230 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31231 counter[CM_REQ_COUNTER]);
31232
31233 /* Quick state check to discard duplicate REQs. */
31234 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31235 if (!cm_id_priv)
31236 return;
31237
31238 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31239 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31240 counter[CM_REP_COUNTER]);
31241 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31242 if (ret)
31243 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31244 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31245 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31246 spin_unlock_irq(&cm_id_priv->lock);
31247 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31248 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31249 counter[CM_RTU_COUNTER]);
31250 goto out;
31251 }
31252 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31253 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31254 dreq_msg->local_comm_id);
31255 if (!cm_id_priv) {
31256 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31257 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31258 counter[CM_DREQ_COUNTER]);
31259 cm_issue_drep(work->port, work->mad_recv_wc);
31260 return -EINVAL;
31261 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31262 case IB_CM_MRA_REP_RCVD:
31263 break;
31264 case IB_CM_TIMEWAIT:
31265 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31266 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31267 counter[CM_DREQ_COUNTER]);
31268 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31269 goto unlock;
31270 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31271 cm_free_msg(msg);
31272 goto deref;
31273 case IB_CM_DREQ_RCVD:
31274 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31275 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31276 counter[CM_DREQ_COUNTER]);
31277 goto unlock;
31278 default:
31279 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31280 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31281 cm_id_priv->msg, timeout)) {
31282 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31283 - atomic_long_inc(&work->port->
31284 + atomic_long_inc_unchecked(&work->port->
31285 counter_group[CM_RECV_DUPLICATES].
31286 counter[CM_MRA_COUNTER]);
31287 goto out;
31288 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31289 break;
31290 case IB_CM_MRA_REQ_RCVD:
31291 case IB_CM_MRA_REP_RCVD:
31292 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31293 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31294 counter[CM_MRA_COUNTER]);
31295 /* fall through */
31296 default:
31297 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31298 case IB_CM_LAP_IDLE:
31299 break;
31300 case IB_CM_MRA_LAP_SENT:
31301 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31302 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31303 counter[CM_LAP_COUNTER]);
31304 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31305 goto unlock;
31306 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31307 cm_free_msg(msg);
31308 goto deref;
31309 case IB_CM_LAP_RCVD:
31310 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31311 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31312 counter[CM_LAP_COUNTER]);
31313 goto unlock;
31314 default:
31315 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31316 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31317 if (cur_cm_id_priv) {
31318 spin_unlock_irq(&cm.lock);
31319 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31320 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31321 counter[CM_SIDR_REQ_COUNTER]);
31322 goto out; /* Duplicate message. */
31323 }
31324 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31325 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31326 msg->retries = 1;
31327
31328 - atomic_long_add(1 + msg->retries,
31329 + atomic_long_add_unchecked(1 + msg->retries,
31330 &port->counter_group[CM_XMIT].counter[attr_index]);
31331 if (msg->retries)
31332 - atomic_long_add(msg->retries,
31333 + atomic_long_add_unchecked(msg->retries,
31334 &port->counter_group[CM_XMIT_RETRIES].
31335 counter[attr_index]);
31336
31337 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31338 }
31339
31340 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31341 - atomic_long_inc(&port->counter_group[CM_RECV].
31342 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31343 counter[attr_id - CM_ATTR_ID_OFFSET]);
31344
31345 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31346 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31347 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31348
31349 return sprintf(buf, "%ld\n",
31350 - atomic_long_read(&group->counter[cm_attr->index]));
31351 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31352 }
31353
31354 static const struct sysfs_ops cm_counter_ops = {
31355 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31356 index 176c8f9..2627b62 100644
31357 --- a/drivers/infiniband/core/fmr_pool.c
31358 +++ b/drivers/infiniband/core/fmr_pool.c
31359 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
31360
31361 struct task_struct *thread;
31362
31363 - atomic_t req_ser;
31364 - atomic_t flush_ser;
31365 + atomic_unchecked_t req_ser;
31366 + atomic_unchecked_t flush_ser;
31367
31368 wait_queue_head_t force_wait;
31369 };
31370 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31371 struct ib_fmr_pool *pool = pool_ptr;
31372
31373 do {
31374 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31375 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31376 ib_fmr_batch_release(pool);
31377
31378 - atomic_inc(&pool->flush_ser);
31379 + atomic_inc_unchecked(&pool->flush_ser);
31380 wake_up_interruptible(&pool->force_wait);
31381
31382 if (pool->flush_function)
31383 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31384 }
31385
31386 set_current_state(TASK_INTERRUPTIBLE);
31387 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31388 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31389 !kthread_should_stop())
31390 schedule();
31391 __set_current_state(TASK_RUNNING);
31392 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31393 pool->dirty_watermark = params->dirty_watermark;
31394 pool->dirty_len = 0;
31395 spin_lock_init(&pool->pool_lock);
31396 - atomic_set(&pool->req_ser, 0);
31397 - atomic_set(&pool->flush_ser, 0);
31398 + atomic_set_unchecked(&pool->req_ser, 0);
31399 + atomic_set_unchecked(&pool->flush_ser, 0);
31400 init_waitqueue_head(&pool->force_wait);
31401
31402 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31403 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
31404 }
31405 spin_unlock_irq(&pool->pool_lock);
31406
31407 - serial = atomic_inc_return(&pool->req_ser);
31408 + serial = atomic_inc_return_unchecked(&pool->req_ser);
31409 wake_up_process(pool->thread);
31410
31411 if (wait_event_interruptible(pool->force_wait,
31412 - atomic_read(&pool->flush_ser) - serial >= 0))
31413 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31414 return -EINTR;
31415
31416 return 0;
31417 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
31418 } else {
31419 list_add_tail(&fmr->list, &pool->dirty_list);
31420 if (++pool->dirty_len >= pool->dirty_watermark) {
31421 - atomic_inc(&pool->req_ser);
31422 + atomic_inc_unchecked(&pool->req_ser);
31423 wake_up_process(pool->thread);
31424 }
31425 }
31426 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
31427 index 40c8353..946b0e4 100644
31428 --- a/drivers/infiniband/hw/cxgb4/mem.c
31429 +++ b/drivers/infiniband/hw/cxgb4/mem.c
31430 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31431 int err;
31432 struct fw_ri_tpte tpt;
31433 u32 stag_idx;
31434 - static atomic_t key;
31435 + static atomic_unchecked_t key;
31436
31437 if (c4iw_fatal_error(rdev))
31438 return -EIO;
31439 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31440 &rdev->resource.tpt_fifo_lock);
31441 if (!stag_idx)
31442 return -ENOMEM;
31443 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
31444 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
31445 }
31446 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
31447 __func__, stag_state, type, pdid, stag_idx);
31448 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
31449 index 79b3dbc..96e5fcc 100644
31450 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
31451 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
31452 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31453 struct ib_atomic_eth *ateth;
31454 struct ipath_ack_entry *e;
31455 u64 vaddr;
31456 - atomic64_t *maddr;
31457 + atomic64_unchecked_t *maddr;
31458 u64 sdata;
31459 u32 rkey;
31460 u8 next;
31461 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31462 IB_ACCESS_REMOTE_ATOMIC)))
31463 goto nack_acc_unlck;
31464 /* Perform atomic OP and save result. */
31465 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31466 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31467 sdata = be64_to_cpu(ateth->swap_data);
31468 e = &qp->s_ack_queue[qp->r_head_ack_queue];
31469 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
31470 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31471 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31472 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31473 be64_to_cpu(ateth->compare_data),
31474 sdata);
31475 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
31476 index 1f95bba..9530f87 100644
31477 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
31478 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
31479 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
31480 unsigned long flags;
31481 struct ib_wc wc;
31482 u64 sdata;
31483 - atomic64_t *maddr;
31484 + atomic64_unchecked_t *maddr;
31485 enum ib_wc_status send_status;
31486
31487 /*
31488 @@ -382,11 +382,11 @@ again:
31489 IB_ACCESS_REMOTE_ATOMIC)))
31490 goto acc_err;
31491 /* Perform atomic OP and save result. */
31492 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31493 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31494 sdata = wqe->wr.wr.atomic.compare_add;
31495 *(u64 *) sqp->s_sge.sge.vaddr =
31496 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
31497 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31498 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31499 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31500 sdata, wqe->wr.wr.atomic.swap);
31501 goto send_comp;
31502 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
31503 index 7140199..da60063 100644
31504 --- a/drivers/infiniband/hw/nes/nes.c
31505 +++ b/drivers/infiniband/hw/nes/nes.c
31506 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
31507 LIST_HEAD(nes_adapter_list);
31508 static LIST_HEAD(nes_dev_list);
31509
31510 -atomic_t qps_destroyed;
31511 +atomic_unchecked_t qps_destroyed;
31512
31513 static unsigned int ee_flsh_adapter;
31514 static unsigned int sysfs_nonidx_addr;
31515 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
31516 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
31517 struct nes_adapter *nesadapter = nesdev->nesadapter;
31518
31519 - atomic_inc(&qps_destroyed);
31520 + atomic_inc_unchecked(&qps_destroyed);
31521
31522 /* Free the control structures */
31523
31524 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
31525 index c438e46..ca30356 100644
31526 --- a/drivers/infiniband/hw/nes/nes.h
31527 +++ b/drivers/infiniband/hw/nes/nes.h
31528 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
31529 extern unsigned int wqm_quanta;
31530 extern struct list_head nes_adapter_list;
31531
31532 -extern atomic_t cm_connects;
31533 -extern atomic_t cm_accepts;
31534 -extern atomic_t cm_disconnects;
31535 -extern atomic_t cm_closes;
31536 -extern atomic_t cm_connecteds;
31537 -extern atomic_t cm_connect_reqs;
31538 -extern atomic_t cm_rejects;
31539 -extern atomic_t mod_qp_timouts;
31540 -extern atomic_t qps_created;
31541 -extern atomic_t qps_destroyed;
31542 -extern atomic_t sw_qps_destroyed;
31543 +extern atomic_unchecked_t cm_connects;
31544 +extern atomic_unchecked_t cm_accepts;
31545 +extern atomic_unchecked_t cm_disconnects;
31546 +extern atomic_unchecked_t cm_closes;
31547 +extern atomic_unchecked_t cm_connecteds;
31548 +extern atomic_unchecked_t cm_connect_reqs;
31549 +extern atomic_unchecked_t cm_rejects;
31550 +extern atomic_unchecked_t mod_qp_timouts;
31551 +extern atomic_unchecked_t qps_created;
31552 +extern atomic_unchecked_t qps_destroyed;
31553 +extern atomic_unchecked_t sw_qps_destroyed;
31554 extern u32 mh_detected;
31555 extern u32 mh_pauses_sent;
31556 extern u32 cm_packets_sent;
31557 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
31558 extern u32 cm_packets_received;
31559 extern u32 cm_packets_dropped;
31560 extern u32 cm_packets_retrans;
31561 -extern atomic_t cm_listens_created;
31562 -extern atomic_t cm_listens_destroyed;
31563 +extern atomic_unchecked_t cm_listens_created;
31564 +extern atomic_unchecked_t cm_listens_destroyed;
31565 extern u32 cm_backlog_drops;
31566 -extern atomic_t cm_loopbacks;
31567 -extern atomic_t cm_nodes_created;
31568 -extern atomic_t cm_nodes_destroyed;
31569 -extern atomic_t cm_accel_dropped_pkts;
31570 -extern atomic_t cm_resets_recvd;
31571 -extern atomic_t pau_qps_created;
31572 -extern atomic_t pau_qps_destroyed;
31573 +extern atomic_unchecked_t cm_loopbacks;
31574 +extern atomic_unchecked_t cm_nodes_created;
31575 +extern atomic_unchecked_t cm_nodes_destroyed;
31576 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31577 +extern atomic_unchecked_t cm_resets_recvd;
31578 +extern atomic_unchecked_t pau_qps_created;
31579 +extern atomic_unchecked_t pau_qps_destroyed;
31580
31581 extern u32 int_mod_timer_init;
31582 extern u32 int_mod_cq_depth_256;
31583 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
31584 index a4972ab..1bcfc31 100644
31585 --- a/drivers/infiniband/hw/nes/nes_cm.c
31586 +++ b/drivers/infiniband/hw/nes/nes_cm.c
31587 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
31588 u32 cm_packets_retrans;
31589 u32 cm_packets_created;
31590 u32 cm_packets_received;
31591 -atomic_t cm_listens_created;
31592 -atomic_t cm_listens_destroyed;
31593 +atomic_unchecked_t cm_listens_created;
31594 +atomic_unchecked_t cm_listens_destroyed;
31595 u32 cm_backlog_drops;
31596 -atomic_t cm_loopbacks;
31597 -atomic_t cm_nodes_created;
31598 -atomic_t cm_nodes_destroyed;
31599 -atomic_t cm_accel_dropped_pkts;
31600 -atomic_t cm_resets_recvd;
31601 +atomic_unchecked_t cm_loopbacks;
31602 +atomic_unchecked_t cm_nodes_created;
31603 +atomic_unchecked_t cm_nodes_destroyed;
31604 +atomic_unchecked_t cm_accel_dropped_pkts;
31605 +atomic_unchecked_t cm_resets_recvd;
31606
31607 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
31608 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
31609 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
31610
31611 static struct nes_cm_core *g_cm_core;
31612
31613 -atomic_t cm_connects;
31614 -atomic_t cm_accepts;
31615 -atomic_t cm_disconnects;
31616 -atomic_t cm_closes;
31617 -atomic_t cm_connecteds;
31618 -atomic_t cm_connect_reqs;
31619 -atomic_t cm_rejects;
31620 +atomic_unchecked_t cm_connects;
31621 +atomic_unchecked_t cm_accepts;
31622 +atomic_unchecked_t cm_disconnects;
31623 +atomic_unchecked_t cm_closes;
31624 +atomic_unchecked_t cm_connecteds;
31625 +atomic_unchecked_t cm_connect_reqs;
31626 +atomic_unchecked_t cm_rejects;
31627
31628 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
31629 {
31630 @@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
31631 kfree(listener);
31632 listener = NULL;
31633 ret = 0;
31634 - atomic_inc(&cm_listens_destroyed);
31635 + atomic_inc_unchecked(&cm_listens_destroyed);
31636 } else {
31637 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
31638 }
31639 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
31640 cm_node->rem_mac);
31641
31642 add_hte_node(cm_core, cm_node);
31643 - atomic_inc(&cm_nodes_created);
31644 + atomic_inc_unchecked(&cm_nodes_created);
31645
31646 return cm_node;
31647 }
31648 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
31649 }
31650
31651 atomic_dec(&cm_core->node_cnt);
31652 - atomic_inc(&cm_nodes_destroyed);
31653 + atomic_inc_unchecked(&cm_nodes_destroyed);
31654 nesqp = cm_node->nesqp;
31655 if (nesqp) {
31656 nesqp->cm_node = NULL;
31657 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
31658
31659 static void drop_packet(struct sk_buff *skb)
31660 {
31661 - atomic_inc(&cm_accel_dropped_pkts);
31662 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31663 dev_kfree_skb_any(skb);
31664 }
31665
31666 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
31667 {
31668
31669 int reset = 0; /* whether to send reset in case of err.. */
31670 - atomic_inc(&cm_resets_recvd);
31671 + atomic_inc_unchecked(&cm_resets_recvd);
31672 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31673 " refcnt=%d\n", cm_node, cm_node->state,
31674 atomic_read(&cm_node->ref_count));
31675 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
31676 rem_ref_cm_node(cm_node->cm_core, cm_node);
31677 return NULL;
31678 }
31679 - atomic_inc(&cm_loopbacks);
31680 + atomic_inc_unchecked(&cm_loopbacks);
31681 loopbackremotenode->loopbackpartner = cm_node;
31682 loopbackremotenode->tcp_cntxt.rcv_wscale =
31683 NES_CM_DEFAULT_RCV_WND_SCALE;
31684 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
31685 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
31686 else {
31687 rem_ref_cm_node(cm_core, cm_node);
31688 - atomic_inc(&cm_accel_dropped_pkts);
31689 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31690 dev_kfree_skb_any(skb);
31691 }
31692 break;
31693 @@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31694
31695 if ((cm_id) && (cm_id->event_handler)) {
31696 if (issue_disconn) {
31697 - atomic_inc(&cm_disconnects);
31698 + atomic_inc_unchecked(&cm_disconnects);
31699 cm_event.event = IW_CM_EVENT_DISCONNECT;
31700 cm_event.status = disconn_status;
31701 cm_event.local_addr = cm_id->local_addr;
31702 @@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31703 }
31704
31705 if (issue_close) {
31706 - atomic_inc(&cm_closes);
31707 + atomic_inc_unchecked(&cm_closes);
31708 nes_disconnect(nesqp, 1);
31709
31710 cm_id->provider_data = nesqp;
31711 @@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31712
31713 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31714 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31715 - atomic_inc(&cm_accepts);
31716 + atomic_inc_unchecked(&cm_accepts);
31717
31718 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31719 netdev_refcnt_read(nesvnic->netdev));
31720 @@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
31721 struct nes_cm_core *cm_core;
31722 u8 *start_buff;
31723
31724 - atomic_inc(&cm_rejects);
31725 + atomic_inc_unchecked(&cm_rejects);
31726 cm_node = (struct nes_cm_node *)cm_id->provider_data;
31727 loopback = cm_node->loopbackpartner;
31728 cm_core = cm_node->cm_core;
31729 @@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31730 ntohl(cm_id->local_addr.sin_addr.s_addr),
31731 ntohs(cm_id->local_addr.sin_port));
31732
31733 - atomic_inc(&cm_connects);
31734 + atomic_inc_unchecked(&cm_connects);
31735 nesqp->active_conn = 1;
31736
31737 /* cache the cm_id in the qp */
31738 @@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
31739 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
31740 return err;
31741 }
31742 - atomic_inc(&cm_listens_created);
31743 + atomic_inc_unchecked(&cm_listens_created);
31744 }
31745
31746 cm_id->add_ref(cm_id);
31747 @@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
31748
31749 if (nesqp->destroyed)
31750 return;
31751 - atomic_inc(&cm_connecteds);
31752 + atomic_inc_unchecked(&cm_connecteds);
31753 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31754 " local port 0x%04X. jiffies = %lu.\n",
31755 nesqp->hwqp.qp_id,
31756 @@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
31757
31758 cm_id->add_ref(cm_id);
31759 ret = cm_id->event_handler(cm_id, &cm_event);
31760 - atomic_inc(&cm_closes);
31761 + atomic_inc_unchecked(&cm_closes);
31762 cm_event.event = IW_CM_EVENT_CLOSE;
31763 cm_event.status = 0;
31764 cm_event.provider_data = cm_id->provider_data;
31765 @@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
31766 return;
31767 cm_id = cm_node->cm_id;
31768
31769 - atomic_inc(&cm_connect_reqs);
31770 + atomic_inc_unchecked(&cm_connect_reqs);
31771 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31772 cm_node, cm_id, jiffies);
31773
31774 @@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
31775 return;
31776 cm_id = cm_node->cm_id;
31777
31778 - atomic_inc(&cm_connect_reqs);
31779 + atomic_inc_unchecked(&cm_connect_reqs);
31780 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31781 cm_node, cm_id, jiffies);
31782
31783 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
31784 index 3ba7be3..c81f6ff 100644
31785 --- a/drivers/infiniband/hw/nes/nes_mgt.c
31786 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
31787 @@ -40,8 +40,8 @@
31788 #include "nes.h"
31789 #include "nes_mgt.h"
31790
31791 -atomic_t pau_qps_created;
31792 -atomic_t pau_qps_destroyed;
31793 +atomic_unchecked_t pau_qps_created;
31794 +atomic_unchecked_t pau_qps_destroyed;
31795
31796 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
31797 {
31798 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
31799 {
31800 struct sk_buff *skb;
31801 unsigned long flags;
31802 - atomic_inc(&pau_qps_destroyed);
31803 + atomic_inc_unchecked(&pau_qps_destroyed);
31804
31805 /* Free packets that have not yet been forwarded */
31806 /* Lock is acquired by skb_dequeue when removing the skb */
31807 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
31808 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
31809 skb_queue_head_init(&nesqp->pau_list);
31810 spin_lock_init(&nesqp->pau_lock);
31811 - atomic_inc(&pau_qps_created);
31812 + atomic_inc_unchecked(&pau_qps_created);
31813 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
31814 }
31815
31816 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
31817 index f3a3ecf..57d311d 100644
31818 --- a/drivers/infiniband/hw/nes/nes_nic.c
31819 +++ b/drivers/infiniband/hw/nes/nes_nic.c
31820 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
31821 target_stat_values[++index] = mh_detected;
31822 target_stat_values[++index] = mh_pauses_sent;
31823 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31824 - target_stat_values[++index] = atomic_read(&cm_connects);
31825 - target_stat_values[++index] = atomic_read(&cm_accepts);
31826 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31827 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31828 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31829 - target_stat_values[++index] = atomic_read(&cm_rejects);
31830 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31831 - target_stat_values[++index] = atomic_read(&qps_created);
31832 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31833 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31834 - target_stat_values[++index] = atomic_read(&cm_closes);
31835 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31836 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31837 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31838 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31839 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31840 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31841 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31842 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31843 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31844 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31845 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31846 target_stat_values[++index] = cm_packets_sent;
31847 target_stat_values[++index] = cm_packets_bounced;
31848 target_stat_values[++index] = cm_packets_created;
31849 target_stat_values[++index] = cm_packets_received;
31850 target_stat_values[++index] = cm_packets_dropped;
31851 target_stat_values[++index] = cm_packets_retrans;
31852 - target_stat_values[++index] = atomic_read(&cm_listens_created);
31853 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
31854 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
31855 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
31856 target_stat_values[++index] = cm_backlog_drops;
31857 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31858 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31859 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31860 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31861 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31862 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31863 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31864 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31865 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31866 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31867 target_stat_values[++index] = nesadapter->free_4kpbl;
31868 target_stat_values[++index] = nesadapter->free_256pbl;
31869 target_stat_values[++index] = int_mod_timer_init;
31870 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
31871 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
31872 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
31873 - target_stat_values[++index] = atomic_read(&pau_qps_created);
31874 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
31875 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
31876 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
31877 }
31878
31879 /**
31880 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
31881 index 0927b5c..ed67986 100644
31882 --- a/drivers/infiniband/hw/nes/nes_verbs.c
31883 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
31884 @@ -46,9 +46,9 @@
31885
31886 #include <rdma/ib_umem.h>
31887
31888 -atomic_t mod_qp_timouts;
31889 -atomic_t qps_created;
31890 -atomic_t sw_qps_destroyed;
31891 +atomic_unchecked_t mod_qp_timouts;
31892 +atomic_unchecked_t qps_created;
31893 +atomic_unchecked_t sw_qps_destroyed;
31894
31895 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31896
31897 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
31898 if (init_attr->create_flags)
31899 return ERR_PTR(-EINVAL);
31900
31901 - atomic_inc(&qps_created);
31902 + atomic_inc_unchecked(&qps_created);
31903 switch (init_attr->qp_type) {
31904 case IB_QPT_RC:
31905 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31906 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
31907 struct iw_cm_event cm_event;
31908 int ret = 0;
31909
31910 - atomic_inc(&sw_qps_destroyed);
31911 + atomic_inc_unchecked(&sw_qps_destroyed);
31912 nesqp->destroyed = 1;
31913
31914 /* Blow away the connection if it exists. */
31915 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
31916 index b881bdc..c2e360c 100644
31917 --- a/drivers/infiniband/hw/qib/qib.h
31918 +++ b/drivers/infiniband/hw/qib/qib.h
31919 @@ -51,6 +51,7 @@
31920 #include <linux/completion.h>
31921 #include <linux/kref.h>
31922 #include <linux/sched.h>
31923 +#include <linux/slab.h>
31924
31925 #include "qib_common.h"
31926 #include "qib_verbs.h"
31927 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
31928 index c351aa4..e6967c2 100644
31929 --- a/drivers/input/gameport/gameport.c
31930 +++ b/drivers/input/gameport/gameport.c
31931 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
31932 */
31933 static void gameport_init_port(struct gameport *gameport)
31934 {
31935 - static atomic_t gameport_no = ATOMIC_INIT(0);
31936 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31937
31938 __module_get(THIS_MODULE);
31939
31940 mutex_init(&gameport->drv_mutex);
31941 device_initialize(&gameport->dev);
31942 dev_set_name(&gameport->dev, "gameport%lu",
31943 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
31944 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31945 gameport->dev.bus = &gameport_bus;
31946 gameport->dev.release = gameport_release_port;
31947 if (gameport->parent)
31948 diff --git a/drivers/input/input.c b/drivers/input/input.c
31949 index 1f78c95..3cddc6c 100644
31950 --- a/drivers/input/input.c
31951 +++ b/drivers/input/input.c
31952 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
31953 */
31954 int input_register_device(struct input_dev *dev)
31955 {
31956 - static atomic_t input_no = ATOMIC_INIT(0);
31957 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31958 struct input_handler *handler;
31959 const char *path;
31960 int error;
31961 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
31962 dev->setkeycode = input_default_setkeycode;
31963
31964 dev_set_name(&dev->dev, "input%ld",
31965 - (unsigned long) atomic_inc_return(&input_no) - 1);
31966 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31967
31968 error = device_add(&dev->dev);
31969 if (error)
31970 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
31971 index b8d8611..7a4a04b 100644
31972 --- a/drivers/input/joystick/sidewinder.c
31973 +++ b/drivers/input/joystick/sidewinder.c
31974 @@ -30,6 +30,7 @@
31975 #include <linux/kernel.h>
31976 #include <linux/module.h>
31977 #include <linux/slab.h>
31978 +#include <linux/sched.h>
31979 #include <linux/init.h>
31980 #include <linux/input.h>
31981 #include <linux/gameport.h>
31982 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
31983 index fd7a0d5..a4af10c 100644
31984 --- a/drivers/input/joystick/xpad.c
31985 +++ b/drivers/input/joystick/xpad.c
31986 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
31987
31988 static int xpad_led_probe(struct usb_xpad *xpad)
31989 {
31990 - static atomic_t led_seq = ATOMIC_INIT(0);
31991 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31992 long led_no;
31993 struct xpad_led *led;
31994 struct led_classdev *led_cdev;
31995 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
31996 if (!led)
31997 return -ENOMEM;
31998
31999 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32000 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32001
32002 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32003 led->xpad = xpad;
32004 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32005 index 0110b5a..d3ad144 100644
32006 --- a/drivers/input/mousedev.c
32007 +++ b/drivers/input/mousedev.c
32008 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32009
32010 spin_unlock_irq(&client->packet_lock);
32011
32012 - if (copy_to_user(buffer, data, count))
32013 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32014 return -EFAULT;
32015
32016 return count;
32017 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32018 index ba70058..571d25d 100644
32019 --- a/drivers/input/serio/serio.c
32020 +++ b/drivers/input/serio/serio.c
32021 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32022 */
32023 static void serio_init_port(struct serio *serio)
32024 {
32025 - static atomic_t serio_no = ATOMIC_INIT(0);
32026 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32027
32028 __module_get(THIS_MODULE);
32029
32030 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32031 mutex_init(&serio->drv_mutex);
32032 device_initialize(&serio->dev);
32033 dev_set_name(&serio->dev, "serio%ld",
32034 - (long)atomic_inc_return(&serio_no) - 1);
32035 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32036 serio->dev.bus = &serio_bus;
32037 serio->dev.release = serio_release_port;
32038 serio->dev.groups = serio_device_attr_groups;
32039 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32040 index e44933d..9ba484a 100644
32041 --- a/drivers/isdn/capi/capi.c
32042 +++ b/drivers/isdn/capi/capi.c
32043 @@ -83,8 +83,8 @@ struct capiminor {
32044
32045 struct capi20_appl *ap;
32046 u32 ncci;
32047 - atomic_t datahandle;
32048 - atomic_t msgid;
32049 + atomic_unchecked_t datahandle;
32050 + atomic_unchecked_t msgid;
32051
32052 struct tty_port port;
32053 int ttyinstop;
32054 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32055 capimsg_setu16(s, 2, mp->ap->applid);
32056 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32057 capimsg_setu8 (s, 5, CAPI_RESP);
32058 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32059 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32060 capimsg_setu32(s, 8, mp->ncci);
32061 capimsg_setu16(s, 12, datahandle);
32062 }
32063 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32064 mp->outbytes -= len;
32065 spin_unlock_bh(&mp->outlock);
32066
32067 - datahandle = atomic_inc_return(&mp->datahandle);
32068 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32069 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32070 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32071 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32072 capimsg_setu16(skb->data, 2, mp->ap->applid);
32073 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32074 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32075 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32076 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32077 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32078 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32079 capimsg_setu16(skb->data, 16, len); /* Data length */
32080 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32081 index db621db..825ea1a 100644
32082 --- a/drivers/isdn/gigaset/common.c
32083 +++ b/drivers/isdn/gigaset/common.c
32084 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32085 cs->commands_pending = 0;
32086 cs->cur_at_seq = 0;
32087 cs->gotfwver = -1;
32088 - cs->open_count = 0;
32089 + local_set(&cs->open_count, 0);
32090 cs->dev = NULL;
32091 cs->tty = NULL;
32092 cs->tty_dev = NULL;
32093 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32094 index 212efaf..f187c6b 100644
32095 --- a/drivers/isdn/gigaset/gigaset.h
32096 +++ b/drivers/isdn/gigaset/gigaset.h
32097 @@ -35,6 +35,7 @@
32098 #include <linux/tty_driver.h>
32099 #include <linux/list.h>
32100 #include <linux/atomic.h>
32101 +#include <asm/local.h>
32102
32103 #define GIG_VERSION {0, 5, 0, 0}
32104 #define GIG_COMPAT {0, 4, 0, 0}
32105 @@ -433,7 +434,7 @@ struct cardstate {
32106 spinlock_t cmdlock;
32107 unsigned curlen, cmdbytes;
32108
32109 - unsigned open_count;
32110 + local_t open_count;
32111 struct tty_struct *tty;
32112 struct tasklet_struct if_wake_tasklet;
32113 unsigned control_state;
32114 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32115 index ee0a549..a7c9798 100644
32116 --- a/drivers/isdn/gigaset/interface.c
32117 +++ b/drivers/isdn/gigaset/interface.c
32118 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32119 }
32120 tty->driver_data = cs;
32121
32122 - ++cs->open_count;
32123 -
32124 - if (cs->open_count == 1) {
32125 + if (local_inc_return(&cs->open_count) == 1) {
32126 spin_lock_irqsave(&cs->lock, flags);
32127 cs->tty = tty;
32128 spin_unlock_irqrestore(&cs->lock, flags);
32129 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32130
32131 if (!cs->connected)
32132 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32133 - else if (!cs->open_count)
32134 + else if (!local_read(&cs->open_count))
32135 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32136 else {
32137 - if (!--cs->open_count) {
32138 + if (!local_dec_return(&cs->open_count)) {
32139 spin_lock_irqsave(&cs->lock, flags);
32140 cs->tty = NULL;
32141 spin_unlock_irqrestore(&cs->lock, flags);
32142 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32143 if (!cs->connected) {
32144 gig_dbg(DEBUG_IF, "not connected");
32145 retval = -ENODEV;
32146 - } else if (!cs->open_count)
32147 + } else if (!local_read(&cs->open_count))
32148 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32149 else {
32150 retval = 0;
32151 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32152 retval = -ENODEV;
32153 goto done;
32154 }
32155 - if (!cs->open_count) {
32156 + if (!local_read(&cs->open_count)) {
32157 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32158 retval = -ENODEV;
32159 goto done;
32160 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32161 if (!cs->connected) {
32162 gig_dbg(DEBUG_IF, "not connected");
32163 retval = -ENODEV;
32164 - } else if (!cs->open_count)
32165 + } else if (!local_read(&cs->open_count))
32166 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32167 else if (cs->mstate != MS_LOCKED) {
32168 dev_warn(cs->dev, "can't write to unlocked device\n");
32169 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32170
32171 if (!cs->connected)
32172 gig_dbg(DEBUG_IF, "not connected");
32173 - else if (!cs->open_count)
32174 + else if (!local_read(&cs->open_count))
32175 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32176 else if (cs->mstate != MS_LOCKED)
32177 dev_warn(cs->dev, "can't write to unlocked device\n");
32178 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32179
32180 if (!cs->connected)
32181 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32182 - else if (!cs->open_count)
32183 + else if (!local_read(&cs->open_count))
32184 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32185 else
32186 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32187 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32188
32189 if (!cs->connected)
32190 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32191 - else if (!cs->open_count)
32192 + else if (!local_read(&cs->open_count))
32193 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32194 else
32195 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32196 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32197 goto out;
32198 }
32199
32200 - if (!cs->open_count) {
32201 + if (!local_read(&cs->open_count)) {
32202 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32203 goto out;
32204 }
32205 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32206 index 2a57da59..e7a12ed 100644
32207 --- a/drivers/isdn/hardware/avm/b1.c
32208 +++ b/drivers/isdn/hardware/avm/b1.c
32209 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32210 }
32211 if (left) {
32212 if (t4file->user) {
32213 - if (copy_from_user(buf, dp, left))
32214 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32215 return -EFAULT;
32216 } else {
32217 memcpy(buf, dp, left);
32218 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32219 }
32220 if (left) {
32221 if (config->user) {
32222 - if (copy_from_user(buf, dp, left))
32223 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32224 return -EFAULT;
32225 } else {
32226 memcpy(buf, dp, left);
32227 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32228 index 85784a7..a19ca98 100644
32229 --- a/drivers/isdn/hardware/eicon/divasync.h
32230 +++ b/drivers/isdn/hardware/eicon/divasync.h
32231 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32232 } diva_didd_add_adapter_t;
32233 typedef struct _diva_didd_remove_adapter {
32234 IDI_CALL p_request;
32235 -} diva_didd_remove_adapter_t;
32236 +} __no_const diva_didd_remove_adapter_t;
32237 typedef struct _diva_didd_read_adapter_array {
32238 void * buffer;
32239 dword length;
32240 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32241 index a3bd163..8956575 100644
32242 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32243 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32244 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32245 typedef struct _diva_os_idi_adapter_interface {
32246 diva_init_card_proc_t cleanup_adapter_proc;
32247 diva_cmd_card_proc_t cmd_proc;
32248 -} diva_os_idi_adapter_interface_t;
32249 +} __no_const diva_os_idi_adapter_interface_t;
32250
32251 typedef struct _diva_os_xdi_adapter {
32252 struct list_head link;
32253 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32254 index 1f355bb..43f1fea 100644
32255 --- a/drivers/isdn/icn/icn.c
32256 +++ b/drivers/isdn/icn/icn.c
32257 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32258 if (count > len)
32259 count = len;
32260 if (user) {
32261 - if (copy_from_user(msg, buf, count))
32262 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32263 return -EFAULT;
32264 } else
32265 memcpy(msg, buf, count);
32266 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32267 index b5fdcb7..5b6c59f 100644
32268 --- a/drivers/lguest/core.c
32269 +++ b/drivers/lguest/core.c
32270 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32271 * it's worked so far. The end address needs +1 because __get_vm_area
32272 * allocates an extra guard page, so we need space for that.
32273 */
32274 +
32275 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32276 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32277 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32278 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32279 +#else
32280 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32281 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32282 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32283 +#endif
32284 +
32285 if (!switcher_vma) {
32286 err = -ENOMEM;
32287 printk("lguest: could not map switcher pages high\n");
32288 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32289 * Now the Switcher is mapped at the right address, we can't fail!
32290 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32291 */
32292 - memcpy(switcher_vma->addr, start_switcher_text,
32293 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32294 end_switcher_text - start_switcher_text);
32295
32296 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32297 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32298 index 3980903..ce25c5e 100644
32299 --- a/drivers/lguest/x86/core.c
32300 +++ b/drivers/lguest/x86/core.c
32301 @@ -59,7 +59,7 @@ static struct {
32302 /* Offset from where switcher.S was compiled to where we've copied it */
32303 static unsigned long switcher_offset(void)
32304 {
32305 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32306 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32307 }
32308
32309 /* This cpu's struct lguest_pages. */
32310 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32311 * These copies are pretty cheap, so we do them unconditionally: */
32312 /* Save the current Host top-level page directory.
32313 */
32314 +
32315 +#ifdef CONFIG_PAX_PER_CPU_PGD
32316 + pages->state.host_cr3 = read_cr3();
32317 +#else
32318 pages->state.host_cr3 = __pa(current->mm->pgd);
32319 +#endif
32320 +
32321 /*
32322 * Set up the Guest's page tables to see this CPU's pages (and no
32323 * other CPU's pages).
32324 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32325 * compiled-in switcher code and the high-mapped copy we just made.
32326 */
32327 for (i = 0; i < IDT_ENTRIES; i++)
32328 - default_idt_entries[i] += switcher_offset();
32329 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32330
32331 /*
32332 * Set up the Switcher's per-cpu areas.
32333 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32334 * it will be undisturbed when we switch. To change %cs and jump we
32335 * need this structure to feed to Intel's "lcall" instruction.
32336 */
32337 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32338 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32339 lguest_entry.segment = LGUEST_CS;
32340
32341 /*
32342 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32343 index 40634b0..4f5855e 100644
32344 --- a/drivers/lguest/x86/switcher_32.S
32345 +++ b/drivers/lguest/x86/switcher_32.S
32346 @@ -87,6 +87,7 @@
32347 #include <asm/page.h>
32348 #include <asm/segment.h>
32349 #include <asm/lguest.h>
32350 +#include <asm/processor-flags.h>
32351
32352 // We mark the start of the code to copy
32353 // It's placed in .text tho it's never run here
32354 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32355 // Changes type when we load it: damn Intel!
32356 // For after we switch over our page tables
32357 // That entry will be read-only: we'd crash.
32358 +
32359 +#ifdef CONFIG_PAX_KERNEXEC
32360 + mov %cr0, %edx
32361 + xor $X86_CR0_WP, %edx
32362 + mov %edx, %cr0
32363 +#endif
32364 +
32365 movl $(GDT_ENTRY_TSS*8), %edx
32366 ltr %dx
32367
32368 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32369 // Let's clear it again for our return.
32370 // The GDT descriptor of the Host
32371 // Points to the table after two "size" bytes
32372 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32373 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32374 // Clear "used" from type field (byte 5, bit 2)
32375 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32376 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32377 +
32378 +#ifdef CONFIG_PAX_KERNEXEC
32379 + mov %cr0, %eax
32380 + xor $X86_CR0_WP, %eax
32381 + mov %eax, %cr0
32382 +#endif
32383
32384 // Once our page table's switched, the Guest is live!
32385 // The Host fades as we run this final step.
32386 @@ -295,13 +309,12 @@ deliver_to_host:
32387 // I consulted gcc, and it gave
32388 // These instructions, which I gladly credit:
32389 leal (%edx,%ebx,8), %eax
32390 - movzwl (%eax),%edx
32391 - movl 4(%eax), %eax
32392 - xorw %ax, %ax
32393 - orl %eax, %edx
32394 + movl 4(%eax), %edx
32395 + movw (%eax), %dx
32396 // Now the address of the handler's in %edx
32397 // We call it now: its "iret" drops us home.
32398 - jmp *%edx
32399 + ljmp $__KERNEL_CS, $1f
32400 +1: jmp *%edx
32401
32402 // Every interrupt can come to us here
32403 // But we must truly tell each apart.
32404 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32405 index 4daf9e5..b8d1d0f 100644
32406 --- a/drivers/macintosh/macio_asic.c
32407 +++ b/drivers/macintosh/macio_asic.c
32408 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32409 * MacIO is matched against any Apple ID, it's probe() function
32410 * will then decide wether it applies or not
32411 */
32412 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32413 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32414 .vendor = PCI_VENDOR_ID_APPLE,
32415 .device = PCI_ANY_ID,
32416 .subvendor = PCI_ANY_ID,
32417 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32418 index 1ce84ed..0fdd40a 100644
32419 --- a/drivers/md/dm-ioctl.c
32420 +++ b/drivers/md/dm-ioctl.c
32421 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32422 cmd == DM_LIST_VERSIONS_CMD)
32423 return 0;
32424
32425 - if ((cmd == DM_DEV_CREATE_CMD)) {
32426 + if (cmd == DM_DEV_CREATE_CMD) {
32427 if (!*param->name) {
32428 DMWARN("name not supplied when creating device");
32429 return -EINVAL;
32430 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32431 index 9bfd057..01180bc 100644
32432 --- a/drivers/md/dm-raid1.c
32433 +++ b/drivers/md/dm-raid1.c
32434 @@ -40,7 +40,7 @@ enum dm_raid1_error {
32435
32436 struct mirror {
32437 struct mirror_set *ms;
32438 - atomic_t error_count;
32439 + atomic_unchecked_t error_count;
32440 unsigned long error_type;
32441 struct dm_dev *dev;
32442 sector_t offset;
32443 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32444 struct mirror *m;
32445
32446 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32447 - if (!atomic_read(&m->error_count))
32448 + if (!atomic_read_unchecked(&m->error_count))
32449 return m;
32450
32451 return NULL;
32452 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
32453 * simple way to tell if a device has encountered
32454 * errors.
32455 */
32456 - atomic_inc(&m->error_count);
32457 + atomic_inc_unchecked(&m->error_count);
32458
32459 if (test_and_set_bit(error_type, &m->error_type))
32460 return;
32461 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
32462 struct mirror *m = get_default_mirror(ms);
32463
32464 do {
32465 - if (likely(!atomic_read(&m->error_count)))
32466 + if (likely(!atomic_read_unchecked(&m->error_count)))
32467 return m;
32468
32469 if (m-- == ms->mirror)
32470 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
32471 {
32472 struct mirror *default_mirror = get_default_mirror(m->ms);
32473
32474 - return !atomic_read(&default_mirror->error_count);
32475 + return !atomic_read_unchecked(&default_mirror->error_count);
32476 }
32477
32478 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32479 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
32480 */
32481 if (likely(region_in_sync(ms, region, 1)))
32482 m = choose_mirror(ms, bio->bi_sector);
32483 - else if (m && atomic_read(&m->error_count))
32484 + else if (m && atomic_read_unchecked(&m->error_count))
32485 m = NULL;
32486
32487 if (likely(m))
32488 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
32489 }
32490
32491 ms->mirror[mirror].ms = ms;
32492 - atomic_set(&(ms->mirror[mirror].error_count), 0);
32493 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32494 ms->mirror[mirror].error_type = 0;
32495 ms->mirror[mirror].offset = offset;
32496
32497 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
32498 */
32499 static char device_status_char(struct mirror *m)
32500 {
32501 - if (!atomic_read(&(m->error_count)))
32502 + if (!atomic_read_unchecked(&(m->error_count)))
32503 return 'A';
32504
32505 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
32506 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
32507 index 3d80cf0..b77cc47 100644
32508 --- a/drivers/md/dm-stripe.c
32509 +++ b/drivers/md/dm-stripe.c
32510 @@ -20,7 +20,7 @@ struct stripe {
32511 struct dm_dev *dev;
32512 sector_t physical_start;
32513
32514 - atomic_t error_count;
32515 + atomic_unchecked_t error_count;
32516 };
32517
32518 struct stripe_c {
32519 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
32520 kfree(sc);
32521 return r;
32522 }
32523 - atomic_set(&(sc->stripe[i].error_count), 0);
32524 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32525 }
32526
32527 ti->private = sc;
32528 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
32529 DMEMIT("%d ", sc->stripes);
32530 for (i = 0; i < sc->stripes; i++) {
32531 DMEMIT("%s ", sc->stripe[i].dev->name);
32532 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32533 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32534 'D' : 'A';
32535 }
32536 buffer[i] = '\0';
32537 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
32538 */
32539 for (i = 0; i < sc->stripes; i++)
32540 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32541 - atomic_inc(&(sc->stripe[i].error_count));
32542 - if (atomic_read(&(sc->stripe[i].error_count)) <
32543 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
32544 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32545 DM_IO_ERROR_THRESHOLD)
32546 schedule_work(&sc->trigger_event);
32547 }
32548 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
32549 index 63cc542..8d45caf3 100644
32550 --- a/drivers/md/dm-table.c
32551 +++ b/drivers/md/dm-table.c
32552 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
32553 if (!dev_size)
32554 return 0;
32555
32556 - if ((start >= dev_size) || (start + len > dev_size)) {
32557 + if ((start >= dev_size) || (len > dev_size - start)) {
32558 DMWARN("%s: %s too small for target: "
32559 "start=%llu, len=%llu, dev_size=%llu",
32560 dm_device_name(ti->table->md), bdevname(bdev, b),
32561 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
32562 index 237571a..fb6d19b 100644
32563 --- a/drivers/md/dm-thin-metadata.c
32564 +++ b/drivers/md/dm-thin-metadata.c
32565 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32566
32567 pmd->info.tm = tm;
32568 pmd->info.levels = 2;
32569 - pmd->info.value_type.context = pmd->data_sm;
32570 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32571 pmd->info.value_type.size = sizeof(__le64);
32572 pmd->info.value_type.inc = data_block_inc;
32573 pmd->info.value_type.dec = data_block_dec;
32574 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32575
32576 pmd->bl_info.tm = tm;
32577 pmd->bl_info.levels = 1;
32578 - pmd->bl_info.value_type.context = pmd->data_sm;
32579 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32580 pmd->bl_info.value_type.size = sizeof(__le64);
32581 pmd->bl_info.value_type.inc = data_block_inc;
32582 pmd->bl_info.value_type.dec = data_block_dec;
32583 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
32584 index b89c548..2af3ce4 100644
32585 --- a/drivers/md/dm.c
32586 +++ b/drivers/md/dm.c
32587 @@ -176,9 +176,9 @@ struct mapped_device {
32588 /*
32589 * Event handling.
32590 */
32591 - atomic_t event_nr;
32592 + atomic_unchecked_t event_nr;
32593 wait_queue_head_t eventq;
32594 - atomic_t uevent_seq;
32595 + atomic_unchecked_t uevent_seq;
32596 struct list_head uevent_list;
32597 spinlock_t uevent_lock; /* Protect access to uevent_list */
32598
32599 @@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
32600 rwlock_init(&md->map_lock);
32601 atomic_set(&md->holders, 1);
32602 atomic_set(&md->open_count, 0);
32603 - atomic_set(&md->event_nr, 0);
32604 - atomic_set(&md->uevent_seq, 0);
32605 + atomic_set_unchecked(&md->event_nr, 0);
32606 + atomic_set_unchecked(&md->uevent_seq, 0);
32607 INIT_LIST_HEAD(&md->uevent_list);
32608 spin_lock_init(&md->uevent_lock);
32609
32610 @@ -1979,7 +1979,7 @@ static void event_callback(void *context)
32611
32612 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32613
32614 - atomic_inc(&md->event_nr);
32615 + atomic_inc_unchecked(&md->event_nr);
32616 wake_up(&md->eventq);
32617 }
32618
32619 @@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
32620
32621 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32622 {
32623 - return atomic_add_return(1, &md->uevent_seq);
32624 + return atomic_add_return_unchecked(1, &md->uevent_seq);
32625 }
32626
32627 uint32_t dm_get_event_nr(struct mapped_device *md)
32628 {
32629 - return atomic_read(&md->event_nr);
32630 + return atomic_read_unchecked(&md->event_nr);
32631 }
32632
32633 int dm_wait_event(struct mapped_device *md, int event_nr)
32634 {
32635 return wait_event_interruptible(md->eventq,
32636 - (event_nr != atomic_read(&md->event_nr)));
32637 + (event_nr != atomic_read_unchecked(&md->event_nr)));
32638 }
32639
32640 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32641 diff --git a/drivers/md/md.c b/drivers/md/md.c
32642 index ce88755..4d8686d 100644
32643 --- a/drivers/md/md.c
32644 +++ b/drivers/md/md.c
32645 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
32646 * start build, activate spare
32647 */
32648 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32649 -static atomic_t md_event_count;
32650 +static atomic_unchecked_t md_event_count;
32651 void md_new_event(struct mddev *mddev)
32652 {
32653 - atomic_inc(&md_event_count);
32654 + atomic_inc_unchecked(&md_event_count);
32655 wake_up(&md_event_waiters);
32656 }
32657 EXPORT_SYMBOL_GPL(md_new_event);
32658 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32659 */
32660 static void md_new_event_inintr(struct mddev *mddev)
32661 {
32662 - atomic_inc(&md_event_count);
32663 + atomic_inc_unchecked(&md_event_count);
32664 wake_up(&md_event_waiters);
32665 }
32666
32667 @@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
32668
32669 rdev->preferred_minor = 0xffff;
32670 rdev->data_offset = le64_to_cpu(sb->data_offset);
32671 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32672 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32673
32674 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32675 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32676 @@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
32677 else
32678 sb->resync_offset = cpu_to_le64(0);
32679
32680 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32681 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32682
32683 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32684 sb->size = cpu_to_le64(mddev->dev_sectors);
32685 @@ -2688,7 +2688,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
32686 static ssize_t
32687 errors_show(struct md_rdev *rdev, char *page)
32688 {
32689 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32690 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32691 }
32692
32693 static ssize_t
32694 @@ -2697,7 +2697,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
32695 char *e;
32696 unsigned long n = simple_strtoul(buf, &e, 10);
32697 if (*buf && (*e == 0 || *e == '\n')) {
32698 - atomic_set(&rdev->corrected_errors, n);
32699 + atomic_set_unchecked(&rdev->corrected_errors, n);
32700 return len;
32701 }
32702 return -EINVAL;
32703 @@ -3083,8 +3083,8 @@ int md_rdev_init(struct md_rdev *rdev)
32704 rdev->sb_loaded = 0;
32705 rdev->bb_page = NULL;
32706 atomic_set(&rdev->nr_pending, 0);
32707 - atomic_set(&rdev->read_errors, 0);
32708 - atomic_set(&rdev->corrected_errors, 0);
32709 + atomic_set_unchecked(&rdev->read_errors, 0);
32710 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32711
32712 INIT_LIST_HEAD(&rdev->same_set);
32713 init_waitqueue_head(&rdev->blocked_wait);
32714 @@ -6735,7 +6735,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32715
32716 spin_unlock(&pers_lock);
32717 seq_printf(seq, "\n");
32718 - seq->poll_event = atomic_read(&md_event_count);
32719 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32720 return 0;
32721 }
32722 if (v == (void*)2) {
32723 @@ -6827,7 +6827,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32724 chunk_kb ? "KB" : "B");
32725 if (bitmap->file) {
32726 seq_printf(seq, ", file: ");
32727 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32728 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32729 }
32730
32731 seq_printf(seq, "\n");
32732 @@ -6858,7 +6858,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
32733 return error;
32734
32735 seq = file->private_data;
32736 - seq->poll_event = atomic_read(&md_event_count);
32737 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32738 return error;
32739 }
32740
32741 @@ -6872,7 +6872,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
32742 /* always allow read */
32743 mask = POLLIN | POLLRDNORM;
32744
32745 - if (seq->poll_event != atomic_read(&md_event_count))
32746 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
32747 mask |= POLLERR | POLLPRI;
32748 return mask;
32749 }
32750 @@ -6916,7 +6916,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
32751 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32752 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32753 (int)part_stat_read(&disk->part0, sectors[1]) -
32754 - atomic_read(&disk->sync_io);
32755 + atomic_read_unchecked(&disk->sync_io);
32756 /* sync IO will cause sync_io to increase before the disk_stats
32757 * as sync_io is counted when a request starts, and
32758 * disk_stats is counted when it completes.
32759 diff --git a/drivers/md/md.h b/drivers/md/md.h
32760 index 44c63df..b795d1a 100644
32761 --- a/drivers/md/md.h
32762 +++ b/drivers/md/md.h
32763 @@ -93,13 +93,13 @@ struct md_rdev {
32764 * only maintained for arrays that
32765 * support hot removal
32766 */
32767 - atomic_t read_errors; /* number of consecutive read errors that
32768 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32769 * we have tried to ignore.
32770 */
32771 struct timespec last_read_error; /* monotonic time since our
32772 * last read error
32773 */
32774 - atomic_t corrected_errors; /* number of corrected read errors,
32775 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32776 * for reporting to userspace and storing
32777 * in superblock.
32778 */
32779 @@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
32780
32781 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32782 {
32783 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32784 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32785 }
32786
32787 struct md_personality
32788 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
32789 index 50ed53b..4f29d7d 100644
32790 --- a/drivers/md/persistent-data/dm-space-map-checker.c
32791 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
32792 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
32793 /*----------------------------------------------------------------*/
32794
32795 struct sm_checker {
32796 - struct dm_space_map sm;
32797 + dm_space_map_no_const sm;
32798
32799 struct count_array old_counts;
32800 struct count_array counts;
32801 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
32802 index fc469ba..2d91555 100644
32803 --- a/drivers/md/persistent-data/dm-space-map-disk.c
32804 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
32805 @@ -23,7 +23,7 @@
32806 * Space map interface.
32807 */
32808 struct sm_disk {
32809 - struct dm_space_map sm;
32810 + dm_space_map_no_const sm;
32811
32812 struct ll_disk ll;
32813 struct ll_disk old_ll;
32814 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
32815 index e89ae5e..062e4c2 100644
32816 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
32817 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
32818 @@ -43,7 +43,7 @@ struct block_op {
32819 };
32820
32821 struct sm_metadata {
32822 - struct dm_space_map sm;
32823 + dm_space_map_no_const sm;
32824
32825 struct ll_disk ll;
32826 struct ll_disk old_ll;
32827 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
32828 index 1cbfc6b..56e1dbb 100644
32829 --- a/drivers/md/persistent-data/dm-space-map.h
32830 +++ b/drivers/md/persistent-data/dm-space-map.h
32831 @@ -60,6 +60,7 @@ struct dm_space_map {
32832 int (*root_size)(struct dm_space_map *sm, size_t *result);
32833 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
32834 };
32835 +typedef struct dm_space_map __no_const dm_space_map_no_const;
32836
32837 /*----------------------------------------------------------------*/
32838
32839 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
32840 index a0b225e..a9be913 100644
32841 --- a/drivers/md/raid1.c
32842 +++ b/drivers/md/raid1.c
32843 @@ -1632,7 +1632,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
32844 if (r1_sync_page_io(rdev, sect, s,
32845 bio->bi_io_vec[idx].bv_page,
32846 READ) != 0)
32847 - atomic_add(s, &rdev->corrected_errors);
32848 + atomic_add_unchecked(s, &rdev->corrected_errors);
32849 }
32850 sectors -= s;
32851 sect += s;
32852 @@ -1845,7 +1845,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
32853 test_bit(In_sync, &rdev->flags)) {
32854 if (r1_sync_page_io(rdev, sect, s,
32855 conf->tmppage, READ)) {
32856 - atomic_add(s, &rdev->corrected_errors);
32857 + atomic_add_unchecked(s, &rdev->corrected_errors);
32858 printk(KERN_INFO
32859 "md/raid1:%s: read error corrected "
32860 "(%d sectors at %llu on %s)\n",
32861 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
32862 index 58c44d6..f090bad 100644
32863 --- a/drivers/md/raid10.c
32864 +++ b/drivers/md/raid10.c
32865 @@ -1623,7 +1623,7 @@ static void end_sync_read(struct bio *bio, int error)
32866 /* The write handler will notice the lack of
32867 * R10BIO_Uptodate and record any errors etc
32868 */
32869 - atomic_add(r10_bio->sectors,
32870 + atomic_add_unchecked(r10_bio->sectors,
32871 &conf->mirrors[d].rdev->corrected_errors);
32872
32873 /* for reconstruct, we always reschedule after a read.
32874 @@ -1974,7 +1974,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32875 {
32876 struct timespec cur_time_mon;
32877 unsigned long hours_since_last;
32878 - unsigned int read_errors = atomic_read(&rdev->read_errors);
32879 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
32880
32881 ktime_get_ts(&cur_time_mon);
32882
32883 @@ -1996,9 +1996,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32884 * overflowing the shift of read_errors by hours_since_last.
32885 */
32886 if (hours_since_last >= 8 * sizeof(read_errors))
32887 - atomic_set(&rdev->read_errors, 0);
32888 + atomic_set_unchecked(&rdev->read_errors, 0);
32889 else
32890 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
32891 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
32892 }
32893
32894 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
32895 @@ -2052,8 +2052,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32896 return;
32897
32898 check_decay_read_errors(mddev, rdev);
32899 - atomic_inc(&rdev->read_errors);
32900 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
32901 + atomic_inc_unchecked(&rdev->read_errors);
32902 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
32903 char b[BDEVNAME_SIZE];
32904 bdevname(rdev->bdev, b);
32905
32906 @@ -2061,7 +2061,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32907 "md/raid10:%s: %s: Raid device exceeded "
32908 "read_error threshold [cur %d:max %d]\n",
32909 mdname(mddev), b,
32910 - atomic_read(&rdev->read_errors), max_read_errors);
32911 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
32912 printk(KERN_NOTICE
32913 "md/raid10:%s: %s: Failing raid device\n",
32914 mdname(mddev), b);
32915 @@ -2210,7 +2210,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32916 (unsigned long long)(
32917 sect + rdev->data_offset),
32918 bdevname(rdev->bdev, b));
32919 - atomic_add(s, &rdev->corrected_errors);
32920 + atomic_add_unchecked(s, &rdev->corrected_errors);
32921 }
32922
32923 rdev_dec_pending(rdev, mddev);
32924 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
32925 index 360f2b9..08b5382 100644
32926 --- a/drivers/md/raid5.c
32927 +++ b/drivers/md/raid5.c
32928 @@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
32929 (unsigned long long)(sh->sector
32930 + rdev->data_offset),
32931 bdevname(rdev->bdev, b));
32932 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
32933 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
32934 clear_bit(R5_ReadError, &sh->dev[i].flags);
32935 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32936 }
32937 - if (atomic_read(&rdev->read_errors))
32938 - atomic_set(&rdev->read_errors, 0);
32939 + if (atomic_read_unchecked(&rdev->read_errors))
32940 + atomic_set_unchecked(&rdev->read_errors, 0);
32941 } else {
32942 const char *bdn = bdevname(rdev->bdev, b);
32943 int retry = 0;
32944
32945 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32946 - atomic_inc(&rdev->read_errors);
32947 + atomic_inc_unchecked(&rdev->read_errors);
32948 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
32949 printk_ratelimited(
32950 KERN_WARNING
32951 @@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
32952 (unsigned long long)(sh->sector
32953 + rdev->data_offset),
32954 bdn);
32955 - else if (atomic_read(&rdev->read_errors)
32956 + else if (atomic_read_unchecked(&rdev->read_errors)
32957 > conf->max_nr_stripes)
32958 printk(KERN_WARNING
32959 "md/raid:%s: Too many read errors, failing device %s.\n",
32960 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
32961 index ce4f858..7bcfb46 100644
32962 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
32963 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
32964 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
32965 .subvendor = _subvend, .subdevice = _subdev, \
32966 .driver_data = (unsigned long)&_driverdata }
32967
32968 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
32969 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
32970 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
32971 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
32972 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
32973 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
32974 index a7d876f..8c21b61 100644
32975 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
32976 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
32977 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
32978 union {
32979 dmx_ts_cb ts;
32980 dmx_section_cb sec;
32981 - } cb;
32982 + } __no_const cb;
32983
32984 struct dvb_demux *demux;
32985 void *priv;
32986 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
32987 index 00a6732..70a682e 100644
32988 --- a/drivers/media/dvb/dvb-core/dvbdev.c
32989 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
32990 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
32991 const struct dvb_device *template, void *priv, int type)
32992 {
32993 struct dvb_device *dvbdev;
32994 - struct file_operations *dvbdevfops;
32995 + file_operations_no_const *dvbdevfops;
32996 struct device *clsdev;
32997 int minor;
32998 int id;
32999 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33000 index 3940bb0..fb3952a 100644
33001 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33002 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33003 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33004
33005 struct dib0700_adapter_state {
33006 int (*set_param_save) (struct dvb_frontend *);
33007 -};
33008 +} __no_const;
33009
33010 static int dib7070_set_param_override(struct dvb_frontend *fe)
33011 {
33012 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33013 index 451c5a7..649f711 100644
33014 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33015 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33016 @@ -95,7 +95,7 @@ struct su3000_state {
33017
33018 struct s6x0_state {
33019 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33020 -};
33021 +} __no_const;
33022
33023 /* debug */
33024 static int dvb_usb_dw2102_debug;
33025 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33026 index 404f63a..4796533 100644
33027 --- a/drivers/media/dvb/frontends/dib3000.h
33028 +++ b/drivers/media/dvb/frontends/dib3000.h
33029 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33030 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33031 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33032 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33033 -};
33034 +} __no_const;
33035
33036 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33037 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33038 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33039 index 8418c02..8555013 100644
33040 --- a/drivers/media/dvb/ngene/ngene-cards.c
33041 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33042 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33043
33044 /****************************************************************************/
33045
33046 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33047 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33048 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33049 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33050 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33051 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33052 index 16a089f..ab1667d 100644
33053 --- a/drivers/media/radio/radio-cadet.c
33054 +++ b/drivers/media/radio/radio-cadet.c
33055 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33056 unsigned char readbuf[RDS_BUFFER];
33057 int i = 0;
33058
33059 + if (count > RDS_BUFFER)
33060 + return -EFAULT;
33061 mutex_lock(&dev->lock);
33062 if (dev->rdsstat == 0) {
33063 dev->rdsstat = 1;
33064 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33065 index 9cde353..8c6a1c3 100644
33066 --- a/drivers/media/video/au0828/au0828.h
33067 +++ b/drivers/media/video/au0828/au0828.h
33068 @@ -191,7 +191,7 @@ struct au0828_dev {
33069
33070 /* I2C */
33071 struct i2c_adapter i2c_adap;
33072 - struct i2c_algorithm i2c_algo;
33073 + i2c_algorithm_no_const i2c_algo;
33074 struct i2c_client i2c_client;
33075 u32 i2c_rc;
33076
33077 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33078 index 04bf662..e0ac026 100644
33079 --- a/drivers/media/video/cx88/cx88-alsa.c
33080 +++ b/drivers/media/video/cx88/cx88-alsa.c
33081 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33082 * Only boards with eeprom and byte 1 at eeprom=1 have it
33083 */
33084
33085 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33086 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33087 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33088 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33089 {0, }
33090 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33091 index 1fb7d5b..3901e77 100644
33092 --- a/drivers/media/video/omap/omap_vout.c
33093 +++ b/drivers/media/video/omap/omap_vout.c
33094 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33095 OMAP_VIDEO2,
33096 };
33097
33098 -static struct videobuf_queue_ops video_vbq_ops;
33099 /* Variables configurable through module params*/
33100 static u32 video1_numbuffers = 3;
33101 static u32 video2_numbuffers = 3;
33102 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33103 {
33104 struct videobuf_queue *q;
33105 struct omap_vout_device *vout = NULL;
33106 + static struct videobuf_queue_ops video_vbq_ops = {
33107 + .buf_setup = omap_vout_buffer_setup,
33108 + .buf_prepare = omap_vout_buffer_prepare,
33109 + .buf_release = omap_vout_buffer_release,
33110 + .buf_queue = omap_vout_buffer_queue,
33111 + };
33112
33113 vout = video_drvdata(file);
33114 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33115 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33116 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33117
33118 q = &vout->vbq;
33119 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33120 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33121 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33122 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33123 spin_lock_init(&vout->vbq_lock);
33124
33125 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33126 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33127 index 305e6aa..0143317 100644
33128 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33129 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33130 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33131
33132 /* I2C stuff */
33133 struct i2c_adapter i2c_adap;
33134 - struct i2c_algorithm i2c_algo;
33135 + i2c_algorithm_no_const i2c_algo;
33136 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33137 int i2c_cx25840_hack_state;
33138 int i2c_linked;
33139 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33140 index 4ed1c7c2..8f15e13 100644
33141 --- a/drivers/media/video/timblogiw.c
33142 +++ b/drivers/media/video/timblogiw.c
33143 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33144
33145 /* Platform device functions */
33146
33147 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33148 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33149 .vidioc_querycap = timblogiw_querycap,
33150 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33151 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33152 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33153 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33154 };
33155
33156 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33157 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33158 .owner = THIS_MODULE,
33159 .open = timblogiw_open,
33160 .release = timblogiw_close,
33161 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33162 index a7dc467..a55c423 100644
33163 --- a/drivers/message/fusion/mptbase.c
33164 +++ b/drivers/message/fusion/mptbase.c
33165 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33166 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33167 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33168
33169 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33170 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33171 +#else
33172 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33173 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33174 +#endif
33175 +
33176 /*
33177 * Rounding UP to nearest 4-kB boundary here...
33178 */
33179 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33180 index 551262e..7551198 100644
33181 --- a/drivers/message/fusion/mptsas.c
33182 +++ b/drivers/message/fusion/mptsas.c
33183 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33184 return 0;
33185 }
33186
33187 +static inline void
33188 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33189 +{
33190 + if (phy_info->port_details) {
33191 + phy_info->port_details->rphy = rphy;
33192 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33193 + ioc->name, rphy));
33194 + }
33195 +
33196 + if (rphy) {
33197 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33198 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33199 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33200 + ioc->name, rphy, rphy->dev.release));
33201 + }
33202 +}
33203 +
33204 /* no mutex */
33205 static void
33206 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33207 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33208 return NULL;
33209 }
33210
33211 -static inline void
33212 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33213 -{
33214 - if (phy_info->port_details) {
33215 - phy_info->port_details->rphy = rphy;
33216 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33217 - ioc->name, rphy));
33218 - }
33219 -
33220 - if (rphy) {
33221 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33222 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33223 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33224 - ioc->name, rphy, rphy->dev.release));
33225 - }
33226 -}
33227 -
33228 static inline struct sas_port *
33229 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33230 {
33231 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33232 index 0c3ced7..1fe34ec 100644
33233 --- a/drivers/message/fusion/mptscsih.c
33234 +++ b/drivers/message/fusion/mptscsih.c
33235 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33236
33237 h = shost_priv(SChost);
33238
33239 - if (h) {
33240 - if (h->info_kbuf == NULL)
33241 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33242 - return h->info_kbuf;
33243 - h->info_kbuf[0] = '\0';
33244 + if (!h)
33245 + return NULL;
33246
33247 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33248 - h->info_kbuf[size-1] = '\0';
33249 - }
33250 + if (h->info_kbuf == NULL)
33251 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33252 + return h->info_kbuf;
33253 + h->info_kbuf[0] = '\0';
33254 +
33255 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33256 + h->info_kbuf[size-1] = '\0';
33257
33258 return h->info_kbuf;
33259 }
33260 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33261 index 6d115c7..58ff7fd 100644
33262 --- a/drivers/message/i2o/i2o_proc.c
33263 +++ b/drivers/message/i2o/i2o_proc.c
33264 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33265 "Array Controller Device"
33266 };
33267
33268 -static char *chtostr(u8 * chars, int n)
33269 -{
33270 - char tmp[256];
33271 - tmp[0] = 0;
33272 - return strncat(tmp, (char *)chars, n);
33273 -}
33274 -
33275 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33276 char *group)
33277 {
33278 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33279
33280 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33281 seq_printf(seq, "%-#8x", ddm_table.module_id);
33282 - seq_printf(seq, "%-29s",
33283 - chtostr(ddm_table.module_name_version, 28));
33284 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33285 seq_printf(seq, "%9d ", ddm_table.data_size);
33286 seq_printf(seq, "%8d", ddm_table.code_size);
33287
33288 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33289
33290 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33291 seq_printf(seq, "%-#8x", dst->module_id);
33292 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33293 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33294 + seq_printf(seq, "%-.28s", dst->module_name_version);
33295 + seq_printf(seq, "%-.8s", dst->date);
33296 seq_printf(seq, "%8d ", dst->module_size);
33297 seq_printf(seq, "%8d ", dst->mpb_size);
33298 seq_printf(seq, "0x%04x", dst->module_flags);
33299 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33300 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33301 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33302 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33303 - seq_printf(seq, "Vendor info : %s\n",
33304 - chtostr((u8 *) (work32 + 2), 16));
33305 - seq_printf(seq, "Product info : %s\n",
33306 - chtostr((u8 *) (work32 + 6), 16));
33307 - seq_printf(seq, "Description : %s\n",
33308 - chtostr((u8 *) (work32 + 10), 16));
33309 - seq_printf(seq, "Product rev. : %s\n",
33310 - chtostr((u8 *) (work32 + 14), 8));
33311 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33312 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33313 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33314 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33315
33316 seq_printf(seq, "Serial number : ");
33317 print_serial_number(seq, (u8 *) (work32 + 16),
33318 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33319 }
33320
33321 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33322 - seq_printf(seq, "Module name : %s\n",
33323 - chtostr(result.module_name, 24));
33324 - seq_printf(seq, "Module revision : %s\n",
33325 - chtostr(result.module_rev, 8));
33326 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33327 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33328
33329 seq_printf(seq, "Serial number : ");
33330 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33331 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33332 return 0;
33333 }
33334
33335 - seq_printf(seq, "Device name : %s\n",
33336 - chtostr(result.device_name, 64));
33337 - seq_printf(seq, "Service name : %s\n",
33338 - chtostr(result.service_name, 64));
33339 - seq_printf(seq, "Physical name : %s\n",
33340 - chtostr(result.physical_location, 64));
33341 - seq_printf(seq, "Instance number : %s\n",
33342 - chtostr(result.instance_number, 4));
33343 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33344 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33345 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33346 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33347
33348 return 0;
33349 }
33350 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33351 index a8c08f3..155fe3d 100644
33352 --- a/drivers/message/i2o/iop.c
33353 +++ b/drivers/message/i2o/iop.c
33354 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33355
33356 spin_lock_irqsave(&c->context_list_lock, flags);
33357
33358 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33359 - atomic_inc(&c->context_list_counter);
33360 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33361 + atomic_inc_unchecked(&c->context_list_counter);
33362
33363 - entry->context = atomic_read(&c->context_list_counter);
33364 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33365
33366 list_add(&entry->list, &c->context_list);
33367
33368 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33369
33370 #if BITS_PER_LONG == 64
33371 spin_lock_init(&c->context_list_lock);
33372 - atomic_set(&c->context_list_counter, 0);
33373 + atomic_set_unchecked(&c->context_list_counter, 0);
33374 INIT_LIST_HEAD(&c->context_list);
33375 #endif
33376
33377 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33378 index 7ce65f4..e66e9bc 100644
33379 --- a/drivers/mfd/abx500-core.c
33380 +++ b/drivers/mfd/abx500-core.c
33381 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33382
33383 struct abx500_device_entry {
33384 struct list_head list;
33385 - struct abx500_ops ops;
33386 + abx500_ops_no_const ops;
33387 struct device *dev;
33388 };
33389
33390 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33391 index a9223ed..4127b13 100644
33392 --- a/drivers/mfd/janz-cmodio.c
33393 +++ b/drivers/mfd/janz-cmodio.c
33394 @@ -13,6 +13,7 @@
33395
33396 #include <linux/kernel.h>
33397 #include <linux/module.h>
33398 +#include <linux/slab.h>
33399 #include <linux/init.h>
33400 #include <linux/pci.h>
33401 #include <linux/interrupt.h>
33402 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33403 index a981e2a..5ca0c8b 100644
33404 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33405 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33406 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33407 * the lid is closed. This leads to interrupts as soon as a little move
33408 * is done.
33409 */
33410 - atomic_inc(&lis3->count);
33411 + atomic_inc_unchecked(&lis3->count);
33412
33413 wake_up_interruptible(&lis3->misc_wait);
33414 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33415 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33416 if (lis3->pm_dev)
33417 pm_runtime_get_sync(lis3->pm_dev);
33418
33419 - atomic_set(&lis3->count, 0);
33420 + atomic_set_unchecked(&lis3->count, 0);
33421 return 0;
33422 }
33423
33424 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33425 add_wait_queue(&lis3->misc_wait, &wait);
33426 while (true) {
33427 set_current_state(TASK_INTERRUPTIBLE);
33428 - data = atomic_xchg(&lis3->count, 0);
33429 + data = atomic_xchg_unchecked(&lis3->count, 0);
33430 if (data)
33431 break;
33432
33433 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33434 struct lis3lv02d, miscdev);
33435
33436 poll_wait(file, &lis3->misc_wait, wait);
33437 - if (atomic_read(&lis3->count))
33438 + if (atomic_read_unchecked(&lis3->count))
33439 return POLLIN | POLLRDNORM;
33440 return 0;
33441 }
33442 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33443 index 2b1482a..5d33616 100644
33444 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
33445 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33446 @@ -266,7 +266,7 @@ struct lis3lv02d {
33447 struct input_polled_dev *idev; /* input device */
33448 struct platform_device *pdev; /* platform device */
33449 struct regulator_bulk_data regulators[2];
33450 - atomic_t count; /* interrupt count after last read */
33451 + atomic_unchecked_t count; /* interrupt count after last read */
33452 union axis_conversion ac; /* hw -> logical axis */
33453 int mapped_btns[3];
33454
33455 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
33456 index 2f30bad..c4c13d0 100644
33457 --- a/drivers/misc/sgi-gru/gruhandles.c
33458 +++ b/drivers/misc/sgi-gru/gruhandles.c
33459 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33460 unsigned long nsec;
33461
33462 nsec = CLKS2NSEC(clks);
33463 - atomic_long_inc(&mcs_op_statistics[op].count);
33464 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
33465 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33466 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
33467 if (mcs_op_statistics[op].max < nsec)
33468 mcs_op_statistics[op].max = nsec;
33469 }
33470 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
33471 index 950dbe9..eeef0f8 100644
33472 --- a/drivers/misc/sgi-gru/gruprocfs.c
33473 +++ b/drivers/misc/sgi-gru/gruprocfs.c
33474 @@ -32,9 +32,9 @@
33475
33476 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33477
33478 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33479 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33480 {
33481 - unsigned long val = atomic_long_read(v);
33482 + unsigned long val = atomic_long_read_unchecked(v);
33483
33484 seq_printf(s, "%16lu %s\n", val, id);
33485 }
33486 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
33487
33488 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
33489 for (op = 0; op < mcsop_last; op++) {
33490 - count = atomic_long_read(&mcs_op_statistics[op].count);
33491 - total = atomic_long_read(&mcs_op_statistics[op].total);
33492 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33493 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33494 max = mcs_op_statistics[op].max;
33495 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33496 count ? total / count : 0, max);
33497 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
33498 index 5c3ce24..4915ccb 100644
33499 --- a/drivers/misc/sgi-gru/grutables.h
33500 +++ b/drivers/misc/sgi-gru/grutables.h
33501 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
33502 * GRU statistics.
33503 */
33504 struct gru_stats_s {
33505 - atomic_long_t vdata_alloc;
33506 - atomic_long_t vdata_free;
33507 - atomic_long_t gts_alloc;
33508 - atomic_long_t gts_free;
33509 - atomic_long_t gms_alloc;
33510 - atomic_long_t gms_free;
33511 - atomic_long_t gts_double_allocate;
33512 - atomic_long_t assign_context;
33513 - atomic_long_t assign_context_failed;
33514 - atomic_long_t free_context;
33515 - atomic_long_t load_user_context;
33516 - atomic_long_t load_kernel_context;
33517 - atomic_long_t lock_kernel_context;
33518 - atomic_long_t unlock_kernel_context;
33519 - atomic_long_t steal_user_context;
33520 - atomic_long_t steal_kernel_context;
33521 - atomic_long_t steal_context_failed;
33522 - atomic_long_t nopfn;
33523 - atomic_long_t asid_new;
33524 - atomic_long_t asid_next;
33525 - atomic_long_t asid_wrap;
33526 - atomic_long_t asid_reuse;
33527 - atomic_long_t intr;
33528 - atomic_long_t intr_cbr;
33529 - atomic_long_t intr_tfh;
33530 - atomic_long_t intr_spurious;
33531 - atomic_long_t intr_mm_lock_failed;
33532 - atomic_long_t call_os;
33533 - atomic_long_t call_os_wait_queue;
33534 - atomic_long_t user_flush_tlb;
33535 - atomic_long_t user_unload_context;
33536 - atomic_long_t user_exception;
33537 - atomic_long_t set_context_option;
33538 - atomic_long_t check_context_retarget_intr;
33539 - atomic_long_t check_context_unload;
33540 - atomic_long_t tlb_dropin;
33541 - atomic_long_t tlb_preload_page;
33542 - atomic_long_t tlb_dropin_fail_no_asid;
33543 - atomic_long_t tlb_dropin_fail_upm;
33544 - atomic_long_t tlb_dropin_fail_invalid;
33545 - atomic_long_t tlb_dropin_fail_range_active;
33546 - atomic_long_t tlb_dropin_fail_idle;
33547 - atomic_long_t tlb_dropin_fail_fmm;
33548 - atomic_long_t tlb_dropin_fail_no_exception;
33549 - atomic_long_t tfh_stale_on_fault;
33550 - atomic_long_t mmu_invalidate_range;
33551 - atomic_long_t mmu_invalidate_page;
33552 - atomic_long_t flush_tlb;
33553 - atomic_long_t flush_tlb_gru;
33554 - atomic_long_t flush_tlb_gru_tgh;
33555 - atomic_long_t flush_tlb_gru_zero_asid;
33556 + atomic_long_unchecked_t vdata_alloc;
33557 + atomic_long_unchecked_t vdata_free;
33558 + atomic_long_unchecked_t gts_alloc;
33559 + atomic_long_unchecked_t gts_free;
33560 + atomic_long_unchecked_t gms_alloc;
33561 + atomic_long_unchecked_t gms_free;
33562 + atomic_long_unchecked_t gts_double_allocate;
33563 + atomic_long_unchecked_t assign_context;
33564 + atomic_long_unchecked_t assign_context_failed;
33565 + atomic_long_unchecked_t free_context;
33566 + atomic_long_unchecked_t load_user_context;
33567 + atomic_long_unchecked_t load_kernel_context;
33568 + atomic_long_unchecked_t lock_kernel_context;
33569 + atomic_long_unchecked_t unlock_kernel_context;
33570 + atomic_long_unchecked_t steal_user_context;
33571 + atomic_long_unchecked_t steal_kernel_context;
33572 + atomic_long_unchecked_t steal_context_failed;
33573 + atomic_long_unchecked_t nopfn;
33574 + atomic_long_unchecked_t asid_new;
33575 + atomic_long_unchecked_t asid_next;
33576 + atomic_long_unchecked_t asid_wrap;
33577 + atomic_long_unchecked_t asid_reuse;
33578 + atomic_long_unchecked_t intr;
33579 + atomic_long_unchecked_t intr_cbr;
33580 + atomic_long_unchecked_t intr_tfh;
33581 + atomic_long_unchecked_t intr_spurious;
33582 + atomic_long_unchecked_t intr_mm_lock_failed;
33583 + atomic_long_unchecked_t call_os;
33584 + atomic_long_unchecked_t call_os_wait_queue;
33585 + atomic_long_unchecked_t user_flush_tlb;
33586 + atomic_long_unchecked_t user_unload_context;
33587 + atomic_long_unchecked_t user_exception;
33588 + atomic_long_unchecked_t set_context_option;
33589 + atomic_long_unchecked_t check_context_retarget_intr;
33590 + atomic_long_unchecked_t check_context_unload;
33591 + atomic_long_unchecked_t tlb_dropin;
33592 + atomic_long_unchecked_t tlb_preload_page;
33593 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33594 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33595 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33596 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33597 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33598 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33599 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33600 + atomic_long_unchecked_t tfh_stale_on_fault;
33601 + atomic_long_unchecked_t mmu_invalidate_range;
33602 + atomic_long_unchecked_t mmu_invalidate_page;
33603 + atomic_long_unchecked_t flush_tlb;
33604 + atomic_long_unchecked_t flush_tlb_gru;
33605 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33606 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33607
33608 - atomic_long_t copy_gpa;
33609 - atomic_long_t read_gpa;
33610 + atomic_long_unchecked_t copy_gpa;
33611 + atomic_long_unchecked_t read_gpa;
33612
33613 - atomic_long_t mesq_receive;
33614 - atomic_long_t mesq_receive_none;
33615 - atomic_long_t mesq_send;
33616 - atomic_long_t mesq_send_failed;
33617 - atomic_long_t mesq_noop;
33618 - atomic_long_t mesq_send_unexpected_error;
33619 - atomic_long_t mesq_send_lb_overflow;
33620 - atomic_long_t mesq_send_qlimit_reached;
33621 - atomic_long_t mesq_send_amo_nacked;
33622 - atomic_long_t mesq_send_put_nacked;
33623 - atomic_long_t mesq_page_overflow;
33624 - atomic_long_t mesq_qf_locked;
33625 - atomic_long_t mesq_qf_noop_not_full;
33626 - atomic_long_t mesq_qf_switch_head_failed;
33627 - atomic_long_t mesq_qf_unexpected_error;
33628 - atomic_long_t mesq_noop_unexpected_error;
33629 - atomic_long_t mesq_noop_lb_overflow;
33630 - atomic_long_t mesq_noop_qlimit_reached;
33631 - atomic_long_t mesq_noop_amo_nacked;
33632 - atomic_long_t mesq_noop_put_nacked;
33633 - atomic_long_t mesq_noop_page_overflow;
33634 + atomic_long_unchecked_t mesq_receive;
33635 + atomic_long_unchecked_t mesq_receive_none;
33636 + atomic_long_unchecked_t mesq_send;
33637 + atomic_long_unchecked_t mesq_send_failed;
33638 + atomic_long_unchecked_t mesq_noop;
33639 + atomic_long_unchecked_t mesq_send_unexpected_error;
33640 + atomic_long_unchecked_t mesq_send_lb_overflow;
33641 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33642 + atomic_long_unchecked_t mesq_send_amo_nacked;
33643 + atomic_long_unchecked_t mesq_send_put_nacked;
33644 + atomic_long_unchecked_t mesq_page_overflow;
33645 + atomic_long_unchecked_t mesq_qf_locked;
33646 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33647 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33648 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33649 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33650 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33651 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33652 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33653 + atomic_long_unchecked_t mesq_noop_put_nacked;
33654 + atomic_long_unchecked_t mesq_noop_page_overflow;
33655
33656 };
33657
33658 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
33659 tghop_invalidate, mcsop_last};
33660
33661 struct mcs_op_statistic {
33662 - atomic_long_t count;
33663 - atomic_long_t total;
33664 + atomic_long_unchecked_t count;
33665 + atomic_long_unchecked_t total;
33666 unsigned long max;
33667 };
33668
33669 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
33670
33671 #define STAT(id) do { \
33672 if (gru_options & OPT_STATS) \
33673 - atomic_long_inc(&gru_stats.id); \
33674 + atomic_long_inc_unchecked(&gru_stats.id); \
33675 } while (0)
33676
33677 #ifdef CONFIG_SGI_GRU_DEBUG
33678 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
33679 index 851b2f2..a4ec097 100644
33680 --- a/drivers/misc/sgi-xp/xp.h
33681 +++ b/drivers/misc/sgi-xp/xp.h
33682 @@ -289,7 +289,7 @@ struct xpc_interface {
33683 xpc_notify_func, void *);
33684 void (*received) (short, int, void *);
33685 enum xp_retval (*partid_to_nasids) (short, void *);
33686 -};
33687 +} __no_const;
33688
33689 extern struct xpc_interface xpc_interface;
33690
33691 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
33692 index b94d5f7..7f494c5 100644
33693 --- a/drivers/misc/sgi-xp/xpc.h
33694 +++ b/drivers/misc/sgi-xp/xpc.h
33695 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
33696 void (*received_payload) (struct xpc_channel *, void *);
33697 void (*notify_senders_of_disconnect) (struct xpc_channel *);
33698 };
33699 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
33700
33701 /* struct xpc_partition act_state values (for XPC HB) */
33702
33703 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
33704 /* found in xpc_main.c */
33705 extern struct device *xpc_part;
33706 extern struct device *xpc_chan;
33707 -extern struct xpc_arch_operations xpc_arch_ops;
33708 +extern xpc_arch_operations_no_const xpc_arch_ops;
33709 extern int xpc_disengage_timelimit;
33710 extern int xpc_disengage_timedout;
33711 extern int xpc_activate_IRQ_rcvd;
33712 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
33713 index 8d082b4..aa749ae 100644
33714 --- a/drivers/misc/sgi-xp/xpc_main.c
33715 +++ b/drivers/misc/sgi-xp/xpc_main.c
33716 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
33717 .notifier_call = xpc_system_die,
33718 };
33719
33720 -struct xpc_arch_operations xpc_arch_ops;
33721 +xpc_arch_operations_no_const xpc_arch_ops;
33722
33723 /*
33724 * Timer function to enforce the timelimit on the partition disengage.
33725 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
33726 index 6ebdc40..9edf5d8 100644
33727 --- a/drivers/mmc/host/sdhci-pci.c
33728 +++ b/drivers/mmc/host/sdhci-pci.c
33729 @@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
33730 .probe = via_probe,
33731 };
33732
33733 -static const struct pci_device_id pci_ids[] __devinitdata = {
33734 +static const struct pci_device_id pci_ids[] __devinitconst = {
33735 {
33736 .vendor = PCI_VENDOR_ID_RICOH,
33737 .device = PCI_DEVICE_ID_RICOH_R5C822,
33738 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
33739 index b1cdf64..ce6e438 100644
33740 --- a/drivers/mtd/devices/doc2000.c
33741 +++ b/drivers/mtd/devices/doc2000.c
33742 @@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
33743
33744 /* The ECC will not be calculated correctly if less than 512 is written */
33745 /* DBB-
33746 - if (len != 0x200 && eccbuf)
33747 + if (len != 0x200)
33748 printk(KERN_WARNING
33749 "ECC needs a full sector write (adr: %lx size %lx)\n",
33750 (long) to, (long) len);
33751 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
33752 index 7543b98..7069947 100644
33753 --- a/drivers/mtd/devices/doc2001.c
33754 +++ b/drivers/mtd/devices/doc2001.c
33755 @@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
33756 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33757
33758 /* Don't allow read past end of device */
33759 - if (from >= this->totlen)
33760 + if (from >= this->totlen || !len)
33761 return -EINVAL;
33762
33763 /* Don't allow a single read to cross a 512-byte block boundary */
33764 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
33765 index 3984d48..28aa897 100644
33766 --- a/drivers/mtd/nand/denali.c
33767 +++ b/drivers/mtd/nand/denali.c
33768 @@ -26,6 +26,7 @@
33769 #include <linux/pci.h>
33770 #include <linux/mtd/mtd.h>
33771 #include <linux/module.h>
33772 +#include <linux/slab.h>
33773
33774 #include "denali.h"
33775
33776 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
33777 index 51b9d6a..52af9a7 100644
33778 --- a/drivers/mtd/nftlmount.c
33779 +++ b/drivers/mtd/nftlmount.c
33780 @@ -24,6 +24,7 @@
33781 #include <asm/errno.h>
33782 #include <linux/delay.h>
33783 #include <linux/slab.h>
33784 +#include <linux/sched.h>
33785 #include <linux/mtd/mtd.h>
33786 #include <linux/mtd/nand.h>
33787 #include <linux/mtd/nftl.h>
33788 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
33789 index 115749f..3021f01 100644
33790 --- a/drivers/mtd/ubi/build.c
33791 +++ b/drivers/mtd/ubi/build.c
33792 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
33793 static int __init bytes_str_to_int(const char *str)
33794 {
33795 char *endp;
33796 - unsigned long result;
33797 + unsigned long result, scale = 1;
33798
33799 result = simple_strtoul(str, &endp, 0);
33800 if (str == endp || result >= INT_MAX) {
33801 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
33802
33803 switch (*endp) {
33804 case 'G':
33805 - result *= 1024;
33806 + scale *= 1024;
33807 case 'M':
33808 - result *= 1024;
33809 + scale *= 1024;
33810 case 'K':
33811 - result *= 1024;
33812 + scale *= 1024;
33813 if (endp[1] == 'i' && endp[2] == 'B')
33814 endp += 2;
33815 case '\0':
33816 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
33817 return -EINVAL;
33818 }
33819
33820 - return result;
33821 + if ((intoverflow_t)result*scale >= INT_MAX) {
33822 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33823 + str);
33824 + return -EINVAL;
33825 + }
33826 +
33827 + return result*scale;
33828 }
33829
33830 /**
33831 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
33832 index 071f4c8..440862e 100644
33833 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
33834 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
33835 @@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
33836 */
33837
33838 #define ATL2_PARAM(X, desc) \
33839 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33840 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33841 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
33842 MODULE_PARM_DESC(X, desc);
33843 #else
33844 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33845 index 66da39f..5dc436d 100644
33846 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33847 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33848 @@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
33849
33850 int (*wait_comp)(struct bnx2x *bp,
33851 struct bnx2x_rx_mode_ramrod_params *p);
33852 -};
33853 +} __no_const;
33854
33855 /********************** Set multicast group ***********************************/
33856
33857 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
33858 index aea8f72..fcebf75 100644
33859 --- a/drivers/net/ethernet/broadcom/tg3.h
33860 +++ b/drivers/net/ethernet/broadcom/tg3.h
33861 @@ -140,6 +140,7 @@
33862 #define CHIPREV_ID_5750_A0 0x4000
33863 #define CHIPREV_ID_5750_A1 0x4001
33864 #define CHIPREV_ID_5750_A3 0x4003
33865 +#define CHIPREV_ID_5750_C1 0x4201
33866 #define CHIPREV_ID_5750_C2 0x4202
33867 #define CHIPREV_ID_5752_A0_HW 0x5000
33868 #define CHIPREV_ID_5752_A0 0x6000
33869 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33870 index c4e8643..0979484 100644
33871 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33872 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33873 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33874 */
33875 struct l2t_skb_cb {
33876 arp_failure_handler_func arp_failure_handler;
33877 -};
33878 +} __no_const;
33879
33880 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33881
33882 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
33883 index 4d71f5a..8004440 100644
33884 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
33885 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
33886 @@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33887 for (i=0; i<ETH_ALEN; i++) {
33888 tmp.addr[i] = dev->dev_addr[i];
33889 }
33890 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33891 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33892 break;
33893
33894 case DE4X5_SET_HWADDR: /* Set the hardware address */
33895 @@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33896 spin_lock_irqsave(&lp->lock, flags);
33897 memcpy(&statbuf, &lp->pktStats, ioc->len);
33898 spin_unlock_irqrestore(&lp->lock, flags);
33899 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
33900 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
33901 return -EFAULT;
33902 break;
33903 }
33904 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
33905 index 14d5b61..1398636 100644
33906 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
33907 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
33908 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
33909 {NULL}};
33910
33911
33912 -static const char *block_name[] __devinitdata = {
33913 +static const char *block_name[] __devinitconst = {
33914 "21140 non-MII",
33915 "21140 MII PHY",
33916 "21142 Serial PHY",
33917 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
33918 index 52da7b2..4ddfe1c 100644
33919 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
33920 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
33921 @@ -236,7 +236,7 @@ struct pci_id_info {
33922 int drv_flags; /* Driver use, intended as capability flags. */
33923 };
33924
33925 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33926 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33927 { /* Sometime a Level-One switch card. */
33928 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
33929 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
33930 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
33931 index 28a3a9b..d96cb63 100644
33932 --- a/drivers/net/ethernet/dlink/sundance.c
33933 +++ b/drivers/net/ethernet/dlink/sundance.c
33934 @@ -218,7 +218,7 @@ enum {
33935 struct pci_id_info {
33936 const char *name;
33937 };
33938 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33939 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33940 {"D-Link DFE-550TX FAST Ethernet Adapter"},
33941 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
33942 {"D-Link DFE-580TX 4 port Server Adapter"},
33943 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
33944 index e703d64..d62ecf9 100644
33945 --- a/drivers/net/ethernet/emulex/benet/be_main.c
33946 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
33947 @@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
33948
33949 if (wrapped)
33950 newacc += 65536;
33951 - ACCESS_ONCE(*acc) = newacc;
33952 + ACCESS_ONCE_RW(*acc) = newacc;
33953 }
33954
33955 void be_parse_stats(struct be_adapter *adapter)
33956 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
33957 index 47f85c3..82ab6c4 100644
33958 --- a/drivers/net/ethernet/faraday/ftgmac100.c
33959 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
33960 @@ -31,6 +31,8 @@
33961 #include <linux/netdevice.h>
33962 #include <linux/phy.h>
33963 #include <linux/platform_device.h>
33964 +#include <linux/interrupt.h>
33965 +#include <linux/irqreturn.h>
33966 #include <net/ip.h>
33967
33968 #include "ftgmac100.h"
33969 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
33970 index bb336a0..4b472da 100644
33971 --- a/drivers/net/ethernet/faraday/ftmac100.c
33972 +++ b/drivers/net/ethernet/faraday/ftmac100.c
33973 @@ -31,6 +31,8 @@
33974 #include <linux/module.h>
33975 #include <linux/netdevice.h>
33976 #include <linux/platform_device.h>
33977 +#include <linux/interrupt.h>
33978 +#include <linux/irqreturn.h>
33979
33980 #include "ftmac100.h"
33981
33982 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
33983 index c82d444..0007fb4 100644
33984 --- a/drivers/net/ethernet/fealnx.c
33985 +++ b/drivers/net/ethernet/fealnx.c
33986 @@ -150,7 +150,7 @@ struct chip_info {
33987 int flags;
33988 };
33989
33990 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
33991 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
33992 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33993 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
33994 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33995 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
33996 index e1159e5..e18684d 100644
33997 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
33998 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
33999 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34000 {
34001 struct e1000_hw *hw = &adapter->hw;
34002 struct e1000_mac_info *mac = &hw->mac;
34003 - struct e1000_mac_operations *func = &mac->ops;
34004 + e1000_mac_operations_no_const *func = &mac->ops;
34005
34006 /* Set media type */
34007 switch (adapter->pdev->device) {
34008 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34009 index a3e65fd..f451444 100644
34010 --- a/drivers/net/ethernet/intel/e1000e/82571.c
34011 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
34012 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34013 {
34014 struct e1000_hw *hw = &adapter->hw;
34015 struct e1000_mac_info *mac = &hw->mac;
34016 - struct e1000_mac_operations *func = &mac->ops;
34017 + e1000_mac_operations_no_const *func = &mac->ops;
34018 u32 swsm = 0;
34019 u32 swsm2 = 0;
34020 bool force_clear_smbi = false;
34021 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34022 index 2967039..ca8c40c 100644
34023 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34024 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34025 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
34026 void (*write_vfta)(struct e1000_hw *, u32, u32);
34027 s32 (*read_mac_addr)(struct e1000_hw *);
34028 };
34029 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34030
34031 /*
34032 * When to use various PHY register access functions:
34033 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
34034 void (*power_up)(struct e1000_hw *);
34035 void (*power_down)(struct e1000_hw *);
34036 };
34037 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34038
34039 /* Function pointers for the NVM. */
34040 struct e1000_nvm_operations {
34041 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34042 s32 (*validate)(struct e1000_hw *);
34043 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34044 };
34045 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34046
34047 struct e1000_mac_info {
34048 - struct e1000_mac_operations ops;
34049 + e1000_mac_operations_no_const ops;
34050 u8 addr[ETH_ALEN];
34051 u8 perm_addr[ETH_ALEN];
34052
34053 @@ -872,7 +875,7 @@ struct e1000_mac_info {
34054 };
34055
34056 struct e1000_phy_info {
34057 - struct e1000_phy_operations ops;
34058 + e1000_phy_operations_no_const ops;
34059
34060 enum e1000_phy_type type;
34061
34062 @@ -906,7 +909,7 @@ struct e1000_phy_info {
34063 };
34064
34065 struct e1000_nvm_info {
34066 - struct e1000_nvm_operations ops;
34067 + e1000_nvm_operations_no_const ops;
34068
34069 enum e1000_nvm_type type;
34070 enum e1000_nvm_override override;
34071 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34072 index f67cbd3..cef9e3d 100644
34073 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34074 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34075 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34076 s32 (*read_mac_addr)(struct e1000_hw *);
34077 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34078 };
34079 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34080
34081 struct e1000_phy_operations {
34082 s32 (*acquire)(struct e1000_hw *);
34083 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34084 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34085 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34086 };
34087 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34088
34089 struct e1000_nvm_operations {
34090 s32 (*acquire)(struct e1000_hw *);
34091 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34092 s32 (*update)(struct e1000_hw *);
34093 s32 (*validate)(struct e1000_hw *);
34094 };
34095 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34096
34097 struct e1000_info {
34098 s32 (*get_invariants)(struct e1000_hw *);
34099 @@ -350,7 +353,7 @@ struct e1000_info {
34100 extern const struct e1000_info e1000_82575_info;
34101
34102 struct e1000_mac_info {
34103 - struct e1000_mac_operations ops;
34104 + e1000_mac_operations_no_const ops;
34105
34106 u8 addr[6];
34107 u8 perm_addr[6];
34108 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34109 };
34110
34111 struct e1000_phy_info {
34112 - struct e1000_phy_operations ops;
34113 + e1000_phy_operations_no_const ops;
34114
34115 enum e1000_phy_type type;
34116
34117 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34118 };
34119
34120 struct e1000_nvm_info {
34121 - struct e1000_nvm_operations ops;
34122 + e1000_nvm_operations_no_const ops;
34123 enum e1000_nvm_type type;
34124 enum e1000_nvm_override override;
34125
34126 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34127 s32 (*check_for_ack)(struct e1000_hw *, u16);
34128 s32 (*check_for_rst)(struct e1000_hw *, u16);
34129 };
34130 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34131
34132 struct e1000_mbx_stats {
34133 u32 msgs_tx;
34134 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34135 };
34136
34137 struct e1000_mbx_info {
34138 - struct e1000_mbx_operations ops;
34139 + e1000_mbx_operations_no_const ops;
34140 struct e1000_mbx_stats stats;
34141 u32 timeout;
34142 u32 usec_delay;
34143 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34144 index 57db3c6..aa825fc 100644
34145 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34146 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34147 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34148 s32 (*read_mac_addr)(struct e1000_hw *);
34149 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34150 };
34151 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34152
34153 struct e1000_mac_info {
34154 - struct e1000_mac_operations ops;
34155 + e1000_mac_operations_no_const ops;
34156 u8 addr[6];
34157 u8 perm_addr[6];
34158
34159 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34160 s32 (*check_for_ack)(struct e1000_hw *);
34161 s32 (*check_for_rst)(struct e1000_hw *);
34162 };
34163 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34164
34165 struct e1000_mbx_stats {
34166 u32 msgs_tx;
34167 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34168 };
34169
34170 struct e1000_mbx_info {
34171 - struct e1000_mbx_operations ops;
34172 + e1000_mbx_operations_no_const ops;
34173 struct e1000_mbx_stats stats;
34174 u32 timeout;
34175 u32 usec_delay;
34176 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34177 index 9b95bef..7e254ee 100644
34178 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34179 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34180 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
34181 s32 (*update_checksum)(struct ixgbe_hw *);
34182 u16 (*calc_checksum)(struct ixgbe_hw *);
34183 };
34184 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34185
34186 struct ixgbe_mac_operations {
34187 s32 (*init_hw)(struct ixgbe_hw *);
34188 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
34189 /* Manageability interface */
34190 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34191 };
34192 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34193
34194 struct ixgbe_phy_operations {
34195 s32 (*identify)(struct ixgbe_hw *);
34196 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
34197 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34198 s32 (*check_overtemp)(struct ixgbe_hw *);
34199 };
34200 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34201
34202 struct ixgbe_eeprom_info {
34203 - struct ixgbe_eeprom_operations ops;
34204 + ixgbe_eeprom_operations_no_const ops;
34205 enum ixgbe_eeprom_type type;
34206 u32 semaphore_delay;
34207 u16 word_size;
34208 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
34209
34210 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34211 struct ixgbe_mac_info {
34212 - struct ixgbe_mac_operations ops;
34213 + ixgbe_mac_operations_no_const ops;
34214 enum ixgbe_mac_type type;
34215 u8 addr[ETH_ALEN];
34216 u8 perm_addr[ETH_ALEN];
34217 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
34218 };
34219
34220 struct ixgbe_phy_info {
34221 - struct ixgbe_phy_operations ops;
34222 + ixgbe_phy_operations_no_const ops;
34223 struct mdio_if_info mdio;
34224 enum ixgbe_phy_type type;
34225 u32 id;
34226 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
34227 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34228 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34229 };
34230 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34231
34232 struct ixgbe_mbx_stats {
34233 u32 msgs_tx;
34234 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
34235 };
34236
34237 struct ixgbe_mbx_info {
34238 - struct ixgbe_mbx_operations ops;
34239 + ixgbe_mbx_operations_no_const ops;
34240 struct ixgbe_mbx_stats stats;
34241 u32 timeout;
34242 u32 usec_delay;
34243 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34244 index 25c951d..cc7cf33 100644
34245 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34246 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34247 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34248 s32 (*clear_vfta)(struct ixgbe_hw *);
34249 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34250 };
34251 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34252
34253 enum ixgbe_mac_type {
34254 ixgbe_mac_unknown = 0,
34255 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34256 };
34257
34258 struct ixgbe_mac_info {
34259 - struct ixgbe_mac_operations ops;
34260 + ixgbe_mac_operations_no_const ops;
34261 u8 addr[6];
34262 u8 perm_addr[6];
34263
34264 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34265 s32 (*check_for_ack)(struct ixgbe_hw *);
34266 s32 (*check_for_rst)(struct ixgbe_hw *);
34267 };
34268 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34269
34270 struct ixgbe_mbx_stats {
34271 u32 msgs_tx;
34272 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34273 };
34274
34275 struct ixgbe_mbx_info {
34276 - struct ixgbe_mbx_operations ops;
34277 + ixgbe_mbx_operations_no_const ops;
34278 struct ixgbe_mbx_stats stats;
34279 u32 timeout;
34280 u32 udelay;
34281 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34282 index d498f04..1b49bed 100644
34283 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34284 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34285 @@ -41,6 +41,7 @@
34286 #include <linux/slab.h>
34287 #include <linux/io-mapping.h>
34288 #include <linux/delay.h>
34289 +#include <linux/sched.h>
34290
34291 #include <linux/mlx4/device.h>
34292 #include <linux/mlx4/doorbell.h>
34293 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34294 index 5046a64..71ca936 100644
34295 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34296 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34297 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34298 void (*link_down)(struct __vxge_hw_device *devh);
34299 void (*crit_err)(struct __vxge_hw_device *devh,
34300 enum vxge_hw_event type, u64 ext_data);
34301 -};
34302 +} __no_const;
34303
34304 /*
34305 * struct __vxge_hw_blockpool_entry - Block private data structure
34306 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34307 index 4a518a3..936b334 100644
34308 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34309 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34310 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34311 struct vxge_hw_mempool_dma *dma_object,
34312 u32 index,
34313 u32 is_last);
34314 -};
34315 +} __no_const;
34316
34317 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34318 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34319 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34320 index bbacb37..d60887d 100644
34321 --- a/drivers/net/ethernet/realtek/r8169.c
34322 +++ b/drivers/net/ethernet/realtek/r8169.c
34323 @@ -695,17 +695,17 @@ struct rtl8169_private {
34324 struct mdio_ops {
34325 void (*write)(void __iomem *, int, int);
34326 int (*read)(void __iomem *, int);
34327 - } mdio_ops;
34328 + } __no_const mdio_ops;
34329
34330 struct pll_power_ops {
34331 void (*down)(struct rtl8169_private *);
34332 void (*up)(struct rtl8169_private *);
34333 - } pll_power_ops;
34334 + } __no_const pll_power_ops;
34335
34336 struct jumbo_ops {
34337 void (*enable)(struct rtl8169_private *);
34338 void (*disable)(struct rtl8169_private *);
34339 - } jumbo_ops;
34340 + } __no_const jumbo_ops;
34341
34342 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34343 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34344 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34345 index 5b118cd..858b523 100644
34346 --- a/drivers/net/ethernet/sis/sis190.c
34347 +++ b/drivers/net/ethernet/sis/sis190.c
34348 @@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34349 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34350 struct net_device *dev)
34351 {
34352 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34353 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34354 struct sis190_private *tp = netdev_priv(dev);
34355 struct pci_dev *isa_bridge;
34356 u8 reg, tmp8;
34357 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34358 index c07cfe9..81cbf7e 100644
34359 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34360 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34361 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34362
34363 writel(value, ioaddr + MMC_CNTRL);
34364
34365 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34366 - MMC_CNTRL, value);
34367 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34368 +// MMC_CNTRL, value);
34369 }
34370
34371 /* To mask all all interrupts.*/
34372 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34373 index dec5836..6d4db7d 100644
34374 --- a/drivers/net/hyperv/hyperv_net.h
34375 +++ b/drivers/net/hyperv/hyperv_net.h
34376 @@ -97,7 +97,7 @@ struct rndis_device {
34377
34378 enum rndis_device_state state;
34379 bool link_state;
34380 - atomic_t new_req_id;
34381 + atomic_unchecked_t new_req_id;
34382
34383 spinlock_t request_lock;
34384 struct list_head req_list;
34385 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34386 index 133b7fb..d58c559 100644
34387 --- a/drivers/net/hyperv/rndis_filter.c
34388 +++ b/drivers/net/hyperv/rndis_filter.c
34389 @@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34390 * template
34391 */
34392 set = &rndis_msg->msg.set_req;
34393 - set->req_id = atomic_inc_return(&dev->new_req_id);
34394 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34395
34396 /* Add to the request list */
34397 spin_lock_irqsave(&dev->request_lock, flags);
34398 @@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34399
34400 /* Setup the rndis set */
34401 halt = &request->request_msg.msg.halt_req;
34402 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34403 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34404
34405 /* Ignore return since this msg is optional. */
34406 rndis_filter_send_request(dev, request);
34407 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34408 index 486b404..0d6677d 100644
34409 --- a/drivers/net/ppp/ppp_generic.c
34410 +++ b/drivers/net/ppp/ppp_generic.c
34411 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34412 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34413 struct ppp_stats stats;
34414 struct ppp_comp_stats cstats;
34415 - char *vers;
34416
34417 switch (cmd) {
34418 case SIOCGPPPSTATS:
34419 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34420 break;
34421
34422 case SIOCGPPPVER:
34423 - vers = PPP_VERSION;
34424 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34425 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34426 break;
34427 err = 0;
34428 break;
34429 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34430 index 515f122..41dd273 100644
34431 --- a/drivers/net/tokenring/abyss.c
34432 +++ b/drivers/net/tokenring/abyss.c
34433 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34434
34435 static int __init abyss_init (void)
34436 {
34437 - abyss_netdev_ops = tms380tr_netdev_ops;
34438 + pax_open_kernel();
34439 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34440
34441 - abyss_netdev_ops.ndo_open = abyss_open;
34442 - abyss_netdev_ops.ndo_stop = abyss_close;
34443 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34444 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34445 + pax_close_kernel();
34446
34447 return pci_register_driver(&abyss_driver);
34448 }
34449 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34450 index 6153cfd..cf69c1c 100644
34451 --- a/drivers/net/tokenring/madgemc.c
34452 +++ b/drivers/net/tokenring/madgemc.c
34453 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34454
34455 static int __init madgemc_init (void)
34456 {
34457 - madgemc_netdev_ops = tms380tr_netdev_ops;
34458 - madgemc_netdev_ops.ndo_open = madgemc_open;
34459 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34460 + pax_open_kernel();
34461 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34462 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34463 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34464 + pax_close_kernel();
34465
34466 return mca_register_driver (&madgemc_driver);
34467 }
34468 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34469 index 8d362e6..f91cc52 100644
34470 --- a/drivers/net/tokenring/proteon.c
34471 +++ b/drivers/net/tokenring/proteon.c
34472 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34473 struct platform_device *pdev;
34474 int i, num = 0, err = 0;
34475
34476 - proteon_netdev_ops = tms380tr_netdev_ops;
34477 - proteon_netdev_ops.ndo_open = proteon_open;
34478 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34479 + pax_open_kernel();
34480 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34481 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34482 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34483 + pax_close_kernel();
34484
34485 err = platform_driver_register(&proteon_driver);
34486 if (err)
34487 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34488 index 46db5c5..37c1536 100644
34489 --- a/drivers/net/tokenring/skisa.c
34490 +++ b/drivers/net/tokenring/skisa.c
34491 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34492 struct platform_device *pdev;
34493 int i, num = 0, err = 0;
34494
34495 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34496 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34497 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34498 + pax_open_kernel();
34499 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34500 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34501 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34502 + pax_close_kernel();
34503
34504 err = platform_driver_register(&sk_isa_driver);
34505 if (err)
34506 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34507 index e1324b4..e1b0041 100644
34508 --- a/drivers/net/usb/hso.c
34509 +++ b/drivers/net/usb/hso.c
34510 @@ -71,7 +71,7 @@
34511 #include <asm/byteorder.h>
34512 #include <linux/serial_core.h>
34513 #include <linux/serial.h>
34514 -
34515 +#include <asm/local.h>
34516
34517 #define MOD_AUTHOR "Option Wireless"
34518 #define MOD_DESCRIPTION "USB High Speed Option driver"
34519 @@ -257,7 +257,7 @@ struct hso_serial {
34520
34521 /* from usb_serial_port */
34522 struct tty_struct *tty;
34523 - int open_count;
34524 + local_t open_count;
34525 spinlock_t serial_lock;
34526
34527 int (*write_data) (struct hso_serial *serial);
34528 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34529 struct urb *urb;
34530
34531 urb = serial->rx_urb[0];
34532 - if (serial->open_count > 0) {
34533 + if (local_read(&serial->open_count) > 0) {
34534 count = put_rxbuf_data(urb, serial);
34535 if (count == -1)
34536 return;
34537 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34538 DUMP1(urb->transfer_buffer, urb->actual_length);
34539
34540 /* Anyone listening? */
34541 - if (serial->open_count == 0)
34542 + if (local_read(&serial->open_count) == 0)
34543 return;
34544
34545 if (status == 0) {
34546 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34547 spin_unlock_irq(&serial->serial_lock);
34548
34549 /* check for port already opened, if not set the termios */
34550 - serial->open_count++;
34551 - if (serial->open_count == 1) {
34552 + if (local_inc_return(&serial->open_count) == 1) {
34553 serial->rx_state = RX_IDLE;
34554 /* Force default termio settings */
34555 _hso_serial_set_termios(tty, NULL);
34556 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34557 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34558 if (result) {
34559 hso_stop_serial_device(serial->parent);
34560 - serial->open_count--;
34561 + local_dec(&serial->open_count);
34562 kref_put(&serial->parent->ref, hso_serial_ref_free);
34563 }
34564 } else {
34565 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34566
34567 /* reset the rts and dtr */
34568 /* do the actual close */
34569 - serial->open_count--;
34570 + local_dec(&serial->open_count);
34571
34572 - if (serial->open_count <= 0) {
34573 - serial->open_count = 0;
34574 + if (local_read(&serial->open_count) <= 0) {
34575 + local_set(&serial->open_count, 0);
34576 spin_lock_irq(&serial->serial_lock);
34577 if (serial->tty == tty) {
34578 serial->tty->driver_data = NULL;
34579 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34580
34581 /* the actual setup */
34582 spin_lock_irqsave(&serial->serial_lock, flags);
34583 - if (serial->open_count)
34584 + if (local_read(&serial->open_count))
34585 _hso_serial_set_termios(tty, old);
34586 else
34587 tty->termios = old;
34588 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34589 D1("Pending read interrupt on port %d\n", i);
34590 spin_lock(&serial->serial_lock);
34591 if (serial->rx_state == RX_IDLE &&
34592 - serial->open_count > 0) {
34593 + local_read(&serial->open_count) > 0) {
34594 /* Setup and send a ctrl req read on
34595 * port i */
34596 if (!serial->rx_urb_filled[0]) {
34597 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34598 /* Start all serial ports */
34599 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34600 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34601 - if (dev2ser(serial_table[i])->open_count) {
34602 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34603 result =
34604 hso_start_serial_device(serial_table[i], GFP_NOIO);
34605 hso_kick_transmit(dev2ser(serial_table[i]));
34606 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34607 index efc0111..79c8f5b 100644
34608 --- a/drivers/net/wireless/ath/ath.h
34609 +++ b/drivers/net/wireless/ath/ath.h
34610 @@ -119,6 +119,7 @@ struct ath_ops {
34611 void (*write_flush) (void *);
34612 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34613 };
34614 +typedef struct ath_ops __no_const ath_ops_no_const;
34615
34616 struct ath_common;
34617 struct ath_bus_ops;
34618 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34619 index 7b6417b..ab5db98 100644
34620 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34621 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34622 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34623 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
34624 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
34625
34626 - ACCESS_ONCE(ads->ds_link) = i->link;
34627 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
34628 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
34629 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
34630
34631 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
34632 ctl6 = SM(i->keytype, AR_EncrType);
34633 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34634
34635 if ((i->is_first || i->is_last) &&
34636 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
34637 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
34638 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
34639 | set11nTries(i->rates, 1)
34640 | set11nTries(i->rates, 2)
34641 | set11nTries(i->rates, 3)
34642 | (i->dur_update ? AR_DurUpdateEna : 0)
34643 | SM(0, AR_BurstDur);
34644
34645 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
34646 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
34647 | set11nRate(i->rates, 1)
34648 | set11nRate(i->rates, 2)
34649 | set11nRate(i->rates, 3);
34650 } else {
34651 - ACCESS_ONCE(ads->ds_ctl2) = 0;
34652 - ACCESS_ONCE(ads->ds_ctl3) = 0;
34653 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
34654 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
34655 }
34656
34657 if (!i->is_first) {
34658 - ACCESS_ONCE(ads->ds_ctl0) = 0;
34659 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34660 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34661 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
34662 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34663 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34664 return;
34665 }
34666
34667 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34668 break;
34669 }
34670
34671 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34672 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34673 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34674 | SM(i->txpower, AR_XmitPower)
34675 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34676 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34677 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
34678 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
34679
34680 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34681 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34682 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34683 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34684
34685 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
34686 return;
34687
34688 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34689 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34690 | set11nPktDurRTSCTS(i->rates, 1);
34691
34692 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34693 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34694 | set11nPktDurRTSCTS(i->rates, 3);
34695
34696 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34697 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34698 | set11nRateFlags(i->rates, 1)
34699 | set11nRateFlags(i->rates, 2)
34700 | set11nRateFlags(i->rates, 3)
34701 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34702 index 09b8c9d..905339e 100644
34703 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34704 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34705 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34706 (i->qcu << AR_TxQcuNum_S) | 0x17;
34707
34708 checksum += val;
34709 - ACCESS_ONCE(ads->info) = val;
34710 + ACCESS_ONCE_RW(ads->info) = val;
34711
34712 checksum += i->link;
34713 - ACCESS_ONCE(ads->link) = i->link;
34714 + ACCESS_ONCE_RW(ads->link) = i->link;
34715
34716 checksum += i->buf_addr[0];
34717 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
34718 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
34719 checksum += i->buf_addr[1];
34720 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
34721 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
34722 checksum += i->buf_addr[2];
34723 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
34724 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
34725 checksum += i->buf_addr[3];
34726 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
34727 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
34728
34729 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
34730 - ACCESS_ONCE(ads->ctl3) = val;
34731 + ACCESS_ONCE_RW(ads->ctl3) = val;
34732 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
34733 - ACCESS_ONCE(ads->ctl5) = val;
34734 + ACCESS_ONCE_RW(ads->ctl5) = val;
34735 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
34736 - ACCESS_ONCE(ads->ctl7) = val;
34737 + ACCESS_ONCE_RW(ads->ctl7) = val;
34738 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
34739 - ACCESS_ONCE(ads->ctl9) = val;
34740 + ACCESS_ONCE_RW(ads->ctl9) = val;
34741
34742 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
34743 - ACCESS_ONCE(ads->ctl10) = checksum;
34744 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
34745
34746 if (i->is_first || i->is_last) {
34747 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
34748 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
34749 | set11nTries(i->rates, 1)
34750 | set11nTries(i->rates, 2)
34751 | set11nTries(i->rates, 3)
34752 | (i->dur_update ? AR_DurUpdateEna : 0)
34753 | SM(0, AR_BurstDur);
34754
34755 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
34756 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
34757 | set11nRate(i->rates, 1)
34758 | set11nRate(i->rates, 2)
34759 | set11nRate(i->rates, 3);
34760 } else {
34761 - ACCESS_ONCE(ads->ctl13) = 0;
34762 - ACCESS_ONCE(ads->ctl14) = 0;
34763 + ACCESS_ONCE_RW(ads->ctl13) = 0;
34764 + ACCESS_ONCE_RW(ads->ctl14) = 0;
34765 }
34766
34767 ads->ctl20 = 0;
34768 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34769
34770 ctl17 = SM(i->keytype, AR_EncrType);
34771 if (!i->is_first) {
34772 - ACCESS_ONCE(ads->ctl11) = 0;
34773 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34774 - ACCESS_ONCE(ads->ctl15) = 0;
34775 - ACCESS_ONCE(ads->ctl16) = 0;
34776 - ACCESS_ONCE(ads->ctl17) = ctl17;
34777 - ACCESS_ONCE(ads->ctl18) = 0;
34778 - ACCESS_ONCE(ads->ctl19) = 0;
34779 + ACCESS_ONCE_RW(ads->ctl11) = 0;
34780 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34781 + ACCESS_ONCE_RW(ads->ctl15) = 0;
34782 + ACCESS_ONCE_RW(ads->ctl16) = 0;
34783 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34784 + ACCESS_ONCE_RW(ads->ctl18) = 0;
34785 + ACCESS_ONCE_RW(ads->ctl19) = 0;
34786 return;
34787 }
34788
34789 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34790 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34791 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34792 | SM(i->txpower, AR_XmitPower)
34793 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34794 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34795 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
34796 ctl12 |= SM(val, AR_PAPRDChainMask);
34797
34798 - ACCESS_ONCE(ads->ctl12) = ctl12;
34799 - ACCESS_ONCE(ads->ctl17) = ctl17;
34800 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
34801 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34802
34803 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34804 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34805 | set11nPktDurRTSCTS(i->rates, 1);
34806
34807 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34808 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34809 | set11nPktDurRTSCTS(i->rates, 3);
34810
34811 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
34812 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
34813 | set11nRateFlags(i->rates, 1)
34814 | set11nRateFlags(i->rates, 2)
34815 | set11nRateFlags(i->rates, 3)
34816 | SM(i->rtscts_rate, AR_RTSCTSRate);
34817
34818 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
34819 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
34820 }
34821
34822 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
34823 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34824 index c8261d4..8d88929 100644
34825 --- a/drivers/net/wireless/ath/ath9k/hw.h
34826 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34827 @@ -773,7 +773,7 @@ struct ath_hw_private_ops {
34828
34829 /* ANI */
34830 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34831 -};
34832 +} __no_const;
34833
34834 /**
34835 * struct ath_hw_ops - callbacks used by hardware code and driver code
34836 @@ -803,7 +803,7 @@ struct ath_hw_ops {
34837 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34838 struct ath_hw_antcomb_conf *antconf);
34839
34840 -};
34841 +} __no_const;
34842
34843 struct ath_nf_limits {
34844 s16 max;
34845 @@ -823,7 +823,7 @@ enum ath_cal_list {
34846 #define AH_FASTCC 0x4
34847
34848 struct ath_hw {
34849 - struct ath_ops reg_ops;
34850 + ath_ops_no_const reg_ops;
34851
34852 struct ieee80211_hw *hw;
34853 struct ath_common common;
34854 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34855 index af00e2c..ab04d34 100644
34856 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34857 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34858 @@ -545,7 +545,7 @@ struct phy_func_ptr {
34859 void (*carrsuppr)(struct brcms_phy *);
34860 s32 (*rxsigpwr)(struct brcms_phy *, s32);
34861 void (*detach)(struct brcms_phy *);
34862 -};
34863 +} __no_const;
34864
34865 struct brcms_phy {
34866 struct brcms_phy_pub pubpi_ro;
34867 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
34868 index a7dfba8..e28eacd 100644
34869 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
34870 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
34871 @@ -3647,7 +3647,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
34872 */
34873 if (il3945_mod_params.disable_hw_scan) {
34874 D_INFO("Disabling hw_scan\n");
34875 - il3945_hw_ops.hw_scan = NULL;
34876 + pax_open_kernel();
34877 + *(void **)&il3945_hw_ops.hw_scan = NULL;
34878 + pax_close_kernel();
34879 }
34880
34881 D_INFO("*** LOAD DRIVER ***\n");
34882 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34883 index f8fc239..8cade22 100644
34884 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34885 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34886 @@ -86,8 +86,8 @@ do { \
34887 } while (0)
34888
34889 #else
34890 -#define IWL_DEBUG(m, level, fmt, args...)
34891 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
34892 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
34893 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
34894 #define iwl_print_hex_dump(m, level, p, len)
34895 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
34896 do { \
34897 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34898 index 4b9e730..7603659 100644
34899 --- a/drivers/net/wireless/mac80211_hwsim.c
34900 +++ b/drivers/net/wireless/mac80211_hwsim.c
34901 @@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
34902 return -EINVAL;
34903
34904 if (fake_hw_scan) {
34905 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34906 - mac80211_hwsim_ops.sw_scan_start = NULL;
34907 - mac80211_hwsim_ops.sw_scan_complete = NULL;
34908 + pax_open_kernel();
34909 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34910 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34911 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34912 + pax_close_kernel();
34913 }
34914
34915 spin_lock_init(&hwsim_radio_lock);
34916 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34917 index 3186aa4..b35b09f 100644
34918 --- a/drivers/net/wireless/mwifiex/main.h
34919 +++ b/drivers/net/wireless/mwifiex/main.h
34920 @@ -536,7 +536,7 @@ struct mwifiex_if_ops {
34921 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34922 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
34923 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
34924 -};
34925 +} __no_const;
34926
34927 struct mwifiex_adapter {
34928 u8 iface_type;
34929 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34930 index a330c69..a81540f 100644
34931 --- a/drivers/net/wireless/rndis_wlan.c
34932 +++ b/drivers/net/wireless/rndis_wlan.c
34933 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34934
34935 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34936
34937 - if (rts_threshold < 0 || rts_threshold > 2347)
34938 + if (rts_threshold > 2347)
34939 rts_threshold = 2347;
34940
34941 tmp = cpu_to_le32(rts_threshold);
34942 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34943 index a77f1bb..c608b2b 100644
34944 --- a/drivers/net/wireless/wl1251/wl1251.h
34945 +++ b/drivers/net/wireless/wl1251/wl1251.h
34946 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
34947 void (*reset)(struct wl1251 *wl);
34948 void (*enable_irq)(struct wl1251 *wl);
34949 void (*disable_irq)(struct wl1251 *wl);
34950 -};
34951 +} __no_const;
34952
34953 struct wl1251 {
34954 struct ieee80211_hw *hw;
34955 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34956 index f34b5b2..b5abb9f 100644
34957 --- a/drivers/oprofile/buffer_sync.c
34958 +++ b/drivers/oprofile/buffer_sync.c
34959 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34960 if (cookie == NO_COOKIE)
34961 offset = pc;
34962 if (cookie == INVALID_COOKIE) {
34963 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34964 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34965 offset = pc;
34966 }
34967 if (cookie != last_cookie) {
34968 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
34969 /* add userspace sample */
34970
34971 if (!mm) {
34972 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
34973 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34974 return 0;
34975 }
34976
34977 cookie = lookup_dcookie(mm, s->eip, &offset);
34978
34979 if (cookie == INVALID_COOKIE) {
34980 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34981 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34982 return 0;
34983 }
34984
34985 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
34986 /* ignore backtraces if failed to add a sample */
34987 if (state == sb_bt_start) {
34988 state = sb_bt_ignore;
34989 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
34990 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
34991 }
34992 }
34993 release_mm(mm);
34994 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
34995 index c0cc4e7..44d4e54 100644
34996 --- a/drivers/oprofile/event_buffer.c
34997 +++ b/drivers/oprofile/event_buffer.c
34998 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
34999 }
35000
35001 if (buffer_pos == buffer_size) {
35002 - atomic_inc(&oprofile_stats.event_lost_overflow);
35003 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35004 return;
35005 }
35006
35007 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35008 index ed2c3ec..deda85a 100644
35009 --- a/drivers/oprofile/oprof.c
35010 +++ b/drivers/oprofile/oprof.c
35011 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35012 if (oprofile_ops.switch_events())
35013 return;
35014
35015 - atomic_inc(&oprofile_stats.multiplex_counter);
35016 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35017 start_switch_worker();
35018 }
35019
35020 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35021 index 917d28e..d62d981 100644
35022 --- a/drivers/oprofile/oprofile_stats.c
35023 +++ b/drivers/oprofile/oprofile_stats.c
35024 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35025 cpu_buf->sample_invalid_eip = 0;
35026 }
35027
35028 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35029 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35030 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35031 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35032 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35033 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35034 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35035 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35036 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35037 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35038 }
35039
35040
35041 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35042 index 38b6fc0..b5cbfce 100644
35043 --- a/drivers/oprofile/oprofile_stats.h
35044 +++ b/drivers/oprofile/oprofile_stats.h
35045 @@ -13,11 +13,11 @@
35046 #include <linux/atomic.h>
35047
35048 struct oprofile_stat_struct {
35049 - atomic_t sample_lost_no_mm;
35050 - atomic_t sample_lost_no_mapping;
35051 - atomic_t bt_lost_no_mapping;
35052 - atomic_t event_lost_overflow;
35053 - atomic_t multiplex_counter;
35054 + atomic_unchecked_t sample_lost_no_mm;
35055 + atomic_unchecked_t sample_lost_no_mapping;
35056 + atomic_unchecked_t bt_lost_no_mapping;
35057 + atomic_unchecked_t event_lost_overflow;
35058 + atomic_unchecked_t multiplex_counter;
35059 };
35060
35061 extern struct oprofile_stat_struct oprofile_stats;
35062 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35063 index 2f0aa0f..90fab02 100644
35064 --- a/drivers/oprofile/oprofilefs.c
35065 +++ b/drivers/oprofile/oprofilefs.c
35066 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35067
35068
35069 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35070 - char const *name, atomic_t *val)
35071 + char const *name, atomic_unchecked_t *val)
35072 {
35073 return __oprofilefs_create_file(sb, root, name,
35074 &atomic_ro_fops, 0444, val);
35075 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35076 index 3f56bc0..707d642 100644
35077 --- a/drivers/parport/procfs.c
35078 +++ b/drivers/parport/procfs.c
35079 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35080
35081 *ppos += len;
35082
35083 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35084 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35085 }
35086
35087 #ifdef CONFIG_PARPORT_1284
35088 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35089
35090 *ppos += len;
35091
35092 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35093 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35094 }
35095 #endif /* IEEE1284.3 support. */
35096
35097 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35098 index 9fff878..ad0ad53 100644
35099 --- a/drivers/pci/hotplug/cpci_hotplug.h
35100 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35101 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35102 int (*hardware_test) (struct slot* slot, u32 value);
35103 u8 (*get_power) (struct slot* slot);
35104 int (*set_power) (struct slot* slot, int value);
35105 -};
35106 +} __no_const;
35107
35108 struct cpci_hp_controller {
35109 unsigned int irq;
35110 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35111 index 76ba8a1..20ca857 100644
35112 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35113 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35114 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35115
35116 void compaq_nvram_init (void __iomem *rom_start)
35117 {
35118 +
35119 +#ifndef CONFIG_PAX_KERNEXEC
35120 if (rom_start) {
35121 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35122 }
35123 +#endif
35124 +
35125 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35126
35127 /* initialize our int15 lock */
35128 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35129 index 24f049e..051f66e 100644
35130 --- a/drivers/pci/pcie/aspm.c
35131 +++ b/drivers/pci/pcie/aspm.c
35132 @@ -27,9 +27,9 @@
35133 #define MODULE_PARAM_PREFIX "pcie_aspm."
35134
35135 /* Note: those are not register definitions */
35136 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35137 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35138 -#define ASPM_STATE_L1 (4) /* L1 state */
35139 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35140 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35141 +#define ASPM_STATE_L1 (4U) /* L1 state */
35142 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35143 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35144
35145 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35146 index 71eac9c..2de27ef 100644
35147 --- a/drivers/pci/probe.c
35148 +++ b/drivers/pci/probe.c
35149 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35150 u32 l, sz, mask;
35151 u16 orig_cmd;
35152
35153 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35154 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35155
35156 if (!dev->mmio_always_on) {
35157 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35158 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35159 index 27911b5..5b6db88 100644
35160 --- a/drivers/pci/proc.c
35161 +++ b/drivers/pci/proc.c
35162 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35163 static int __init pci_proc_init(void)
35164 {
35165 struct pci_dev *dev = NULL;
35166 +
35167 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35168 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35169 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35170 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35171 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35172 +#endif
35173 +#else
35174 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35175 +#endif
35176 proc_create("devices", 0, proc_bus_pci_dir,
35177 &proc_bus_pci_dev_operations);
35178 proc_initialized = 1;
35179 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35180 index ea0c607..58c4628 100644
35181 --- a/drivers/platform/x86/thinkpad_acpi.c
35182 +++ b/drivers/platform/x86/thinkpad_acpi.c
35183 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35184 return 0;
35185 }
35186
35187 -void static hotkey_mask_warn_incomplete_mask(void)
35188 +static void hotkey_mask_warn_incomplete_mask(void)
35189 {
35190 /* log only what the user can fix... */
35191 const u32 wantedmask = hotkey_driver_mask &
35192 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35193 }
35194 }
35195
35196 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35197 - struct tp_nvram_state *newn,
35198 - const u32 event_mask)
35199 -{
35200 -
35201 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35202 do { \
35203 if ((event_mask & (1 << __scancode)) && \
35204 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35205 tpacpi_hotkey_send_key(__scancode); \
35206 } while (0)
35207
35208 - void issue_volchange(const unsigned int oldvol,
35209 - const unsigned int newvol)
35210 - {
35211 - unsigned int i = oldvol;
35212 +static void issue_volchange(const unsigned int oldvol,
35213 + const unsigned int newvol,
35214 + const u32 event_mask)
35215 +{
35216 + unsigned int i = oldvol;
35217
35218 - while (i > newvol) {
35219 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35220 - i--;
35221 - }
35222 - while (i < newvol) {
35223 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35224 - i++;
35225 - }
35226 + while (i > newvol) {
35227 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35228 + i--;
35229 }
35230 + while (i < newvol) {
35231 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35232 + i++;
35233 + }
35234 +}
35235
35236 - void issue_brightnesschange(const unsigned int oldbrt,
35237 - const unsigned int newbrt)
35238 - {
35239 - unsigned int i = oldbrt;
35240 +static void issue_brightnesschange(const unsigned int oldbrt,
35241 + const unsigned int newbrt,
35242 + const u32 event_mask)
35243 +{
35244 + unsigned int i = oldbrt;
35245
35246 - while (i > newbrt) {
35247 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35248 - i--;
35249 - }
35250 - while (i < newbrt) {
35251 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35252 - i++;
35253 - }
35254 + while (i > newbrt) {
35255 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35256 + i--;
35257 + }
35258 + while (i < newbrt) {
35259 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35260 + i++;
35261 }
35262 +}
35263
35264 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35265 + struct tp_nvram_state *newn,
35266 + const u32 event_mask)
35267 +{
35268 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35269 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35270 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35271 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35272 oldn->volume_level != newn->volume_level) {
35273 /* recently muted, or repeated mute keypress, or
35274 * multiple presses ending in mute */
35275 - issue_volchange(oldn->volume_level, newn->volume_level);
35276 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35277 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35278 }
35279 } else {
35280 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35281 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35282 }
35283 if (oldn->volume_level != newn->volume_level) {
35284 - issue_volchange(oldn->volume_level, newn->volume_level);
35285 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35286 } else if (oldn->volume_toggle != newn->volume_toggle) {
35287 /* repeated vol up/down keypress at end of scale ? */
35288 if (newn->volume_level == 0)
35289 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35290 /* handle brightness */
35291 if (oldn->brightness_level != newn->brightness_level) {
35292 issue_brightnesschange(oldn->brightness_level,
35293 - newn->brightness_level);
35294 + newn->brightness_level,
35295 + event_mask);
35296 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35297 /* repeated key presses that didn't change state */
35298 if (newn->brightness_level == 0)
35299 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35300 && !tp_features.bright_unkfw)
35301 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35302 }
35303 +}
35304
35305 #undef TPACPI_COMPARE_KEY
35306 #undef TPACPI_MAY_SEND_KEY
35307 -}
35308
35309 /*
35310 * Polling driver
35311 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35312 index b859d16..5cc6b1a 100644
35313 --- a/drivers/pnp/pnpbios/bioscalls.c
35314 +++ b/drivers/pnp/pnpbios/bioscalls.c
35315 @@ -59,7 +59,7 @@ do { \
35316 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35317 } while(0)
35318
35319 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35320 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35321 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35322
35323 /*
35324 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35325
35326 cpu = get_cpu();
35327 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35328 +
35329 + pax_open_kernel();
35330 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35331 + pax_close_kernel();
35332
35333 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35334 spin_lock_irqsave(&pnp_bios_lock, flags);
35335 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35336 :"memory");
35337 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35338
35339 + pax_open_kernel();
35340 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35341 + pax_close_kernel();
35342 +
35343 put_cpu();
35344
35345 /* If we get here and this is set then the PnP BIOS faulted on us. */
35346 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35347 return status;
35348 }
35349
35350 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35351 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35352 {
35353 int i;
35354
35355 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35356 pnp_bios_callpoint.offset = header->fields.pm16offset;
35357 pnp_bios_callpoint.segment = PNP_CS16;
35358
35359 + pax_open_kernel();
35360 +
35361 for_each_possible_cpu(i) {
35362 struct desc_struct *gdt = get_cpu_gdt_table(i);
35363 if (!gdt)
35364 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35365 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35366 (unsigned long)__va(header->fields.pm16dseg));
35367 }
35368 +
35369 + pax_close_kernel();
35370 }
35371 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35372 index b0ecacb..7c9da2e 100644
35373 --- a/drivers/pnp/resource.c
35374 +++ b/drivers/pnp/resource.c
35375 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35376 return 1;
35377
35378 /* check if the resource is valid */
35379 - if (*irq < 0 || *irq > 15)
35380 + if (*irq > 15)
35381 return 0;
35382
35383 /* check if the resource is reserved */
35384 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35385 return 1;
35386
35387 /* check if the resource is valid */
35388 - if (*dma < 0 || *dma == 4 || *dma > 7)
35389 + if (*dma == 4 || *dma > 7)
35390 return 0;
35391
35392 /* check if the resource is reserved */
35393 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35394 index 1ed6ea0..77c0bd2 100644
35395 --- a/drivers/power/bq27x00_battery.c
35396 +++ b/drivers/power/bq27x00_battery.c
35397 @@ -72,7 +72,7 @@
35398 struct bq27x00_device_info;
35399 struct bq27x00_access_methods {
35400 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35401 -};
35402 +} __no_const;
35403
35404 enum bq27x00_chip { BQ27000, BQ27500 };
35405
35406 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35407 index a838e66..a9e1665 100644
35408 --- a/drivers/regulator/max8660.c
35409 +++ b/drivers/regulator/max8660.c
35410 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35411 max8660->shadow_regs[MAX8660_OVER1] = 5;
35412 } else {
35413 /* Otherwise devices can be toggled via software */
35414 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35415 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35416 + pax_open_kernel();
35417 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35418 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35419 + pax_close_kernel();
35420 }
35421
35422 /*
35423 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35424 index e8cfc99..072aee2 100644
35425 --- a/drivers/regulator/mc13892-regulator.c
35426 +++ b/drivers/regulator/mc13892-regulator.c
35427 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35428 }
35429 mc13xxx_unlock(mc13892);
35430
35431 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35432 + pax_open_kernel();
35433 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35434 = mc13892_vcam_set_mode;
35435 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35436 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35437 = mc13892_vcam_get_mode;
35438 + pax_close_kernel();
35439
35440 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35441 ARRAY_SIZE(mc13892_regulators));
35442 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35443 index cace6d3..f623fda 100644
35444 --- a/drivers/rtc/rtc-dev.c
35445 +++ b/drivers/rtc/rtc-dev.c
35446 @@ -14,6 +14,7 @@
35447 #include <linux/module.h>
35448 #include <linux/rtc.h>
35449 #include <linux/sched.h>
35450 +#include <linux/grsecurity.h>
35451 #include "rtc-core.h"
35452
35453 static dev_t rtc_devt;
35454 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35455 if (copy_from_user(&tm, uarg, sizeof(tm)))
35456 return -EFAULT;
35457
35458 + gr_log_timechange();
35459 +
35460 return rtc_set_time(rtc, &tm);
35461
35462 case RTC_PIE_ON:
35463 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35464 index ffb5878..e6d785c 100644
35465 --- a/drivers/scsi/aacraid/aacraid.h
35466 +++ b/drivers/scsi/aacraid/aacraid.h
35467 @@ -492,7 +492,7 @@ struct adapter_ops
35468 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35469 /* Administrative operations */
35470 int (*adapter_comm)(struct aac_dev * dev, int comm);
35471 -};
35472 +} __no_const;
35473
35474 /*
35475 * Define which interrupt handler needs to be installed
35476 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35477 index 705e13e..91c873c 100644
35478 --- a/drivers/scsi/aacraid/linit.c
35479 +++ b/drivers/scsi/aacraid/linit.c
35480 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35481 #elif defined(__devinitconst)
35482 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35483 #else
35484 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35485 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35486 #endif
35487 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35488 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35489 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35490 index d5ff142..49c0ebb 100644
35491 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35492 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35493 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35494 .lldd_control_phy = asd_control_phy,
35495 };
35496
35497 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35498 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35499 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35500 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35501 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35502 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35503 index a796de9..1ef20e1 100644
35504 --- a/drivers/scsi/bfa/bfa.h
35505 +++ b/drivers/scsi/bfa/bfa.h
35506 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35507 u32 *end);
35508 int cpe_vec_q0;
35509 int rme_vec_q0;
35510 -};
35511 +} __no_const;
35512 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35513
35514 struct bfa_faa_cbfn_s {
35515 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35516 index f0f80e2..8ec946b 100644
35517 --- a/drivers/scsi/bfa/bfa_fcpim.c
35518 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35519 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
35520
35521 bfa_iotag_attach(fcp);
35522
35523 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
35524 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
35525 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
35526 (fcp->num_itns * sizeof(struct bfa_itn_s));
35527 memset(fcp->itn_arr, 0,
35528 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35529 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35530 {
35531 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35532 - struct bfa_itn_s *itn;
35533 + bfa_itn_s_no_const *itn;
35534
35535 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35536 itn->isr = isr;
35537 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35538 index 36f26da..38a34a8 100644
35539 --- a/drivers/scsi/bfa/bfa_fcpim.h
35540 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35541 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35542 struct bfa_itn_s {
35543 bfa_isr_func_t isr;
35544 };
35545 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35546
35547 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35548 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35549 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
35550 struct list_head iotag_tio_free_q; /* free IO resources */
35551 struct list_head iotag_unused_q; /* unused IO resources*/
35552 struct bfa_iotag_s *iotag_arr;
35553 - struct bfa_itn_s *itn_arr;
35554 + bfa_itn_s_no_const *itn_arr;
35555 int num_ioim_reqs;
35556 int num_fwtio_reqs;
35557 int num_itns;
35558 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35559 index 546d46b..642fa5b 100644
35560 --- a/drivers/scsi/bfa/bfa_ioc.h
35561 +++ b/drivers/scsi/bfa/bfa_ioc.h
35562 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35563 bfa_ioc_disable_cbfn_t disable_cbfn;
35564 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35565 bfa_ioc_reset_cbfn_t reset_cbfn;
35566 -};
35567 +} __no_const;
35568
35569 /*
35570 * IOC event notification mechanism.
35571 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35572 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35573 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35574 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35575 -};
35576 +} __no_const;
35577
35578 /*
35579 * Queue element to wait for room in request queue. FIFO order is
35580 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35581 index 351dc0b..951dc32 100644
35582 --- a/drivers/scsi/hosts.c
35583 +++ b/drivers/scsi/hosts.c
35584 @@ -42,7 +42,7 @@
35585 #include "scsi_logging.h"
35586
35587
35588 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35589 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35590
35591
35592 static void scsi_host_cls_release(struct device *dev)
35593 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35594 * subtract one because we increment first then return, but we need to
35595 * know what the next host number was before increment
35596 */
35597 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35598 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35599 shost->dma_channel = 0xff;
35600
35601 /* These three are default values which can be overridden */
35602 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35603 index b96962c..0c82ec2 100644
35604 --- a/drivers/scsi/hpsa.c
35605 +++ b/drivers/scsi/hpsa.c
35606 @@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
35607 u32 a;
35608
35609 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35610 - return h->access.command_completed(h);
35611 + return h->access->command_completed(h);
35612
35613 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35614 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35615 @@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
35616 while (!list_empty(&h->reqQ)) {
35617 c = list_entry(h->reqQ.next, struct CommandList, list);
35618 /* can't do anything if fifo is full */
35619 - if ((h->access.fifo_full(h))) {
35620 + if ((h->access->fifo_full(h))) {
35621 dev_warn(&h->pdev->dev, "fifo full\n");
35622 break;
35623 }
35624 @@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
35625 h->Qdepth--;
35626
35627 /* Tell the controller execute command */
35628 - h->access.submit_command(h, c);
35629 + h->access->submit_command(h, c);
35630
35631 /* Put job onto the completed Q */
35632 addQ(&h->cmpQ, c);
35633 @@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
35634
35635 static inline unsigned long get_next_completion(struct ctlr_info *h)
35636 {
35637 - return h->access.command_completed(h);
35638 + return h->access->command_completed(h);
35639 }
35640
35641 static inline bool interrupt_pending(struct ctlr_info *h)
35642 {
35643 - return h->access.intr_pending(h);
35644 + return h->access->intr_pending(h);
35645 }
35646
35647 static inline long interrupt_not_for_us(struct ctlr_info *h)
35648 {
35649 - return (h->access.intr_pending(h) == 0) ||
35650 + return (h->access->intr_pending(h) == 0) ||
35651 (h->interrupts_enabled == 0);
35652 }
35653
35654 @@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35655 if (prod_index < 0)
35656 return -ENODEV;
35657 h->product_name = products[prod_index].product_name;
35658 - h->access = *(products[prod_index].access);
35659 + h->access = products[prod_index].access;
35660
35661 if (hpsa_board_disabled(h->pdev)) {
35662 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35663 @@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
35664
35665 assert_spin_locked(&lockup_detector_lock);
35666 remove_ctlr_from_lockup_detector_list(h);
35667 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35668 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35669 spin_lock_irqsave(&h->lock, flags);
35670 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
35671 spin_unlock_irqrestore(&h->lock, flags);
35672 @@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
35673 }
35674
35675 /* make sure the board interrupts are off */
35676 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35677 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35678
35679 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35680 goto clean2;
35681 @@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
35682 * fake ones to scoop up any residual completions.
35683 */
35684 spin_lock_irqsave(&h->lock, flags);
35685 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35686 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35687 spin_unlock_irqrestore(&h->lock, flags);
35688 free_irq(h->intr[h->intr_mode], h);
35689 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35690 @@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
35691 dev_info(&h->pdev->dev, "Board READY.\n");
35692 dev_info(&h->pdev->dev,
35693 "Waiting for stale completions to drain.\n");
35694 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35695 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35696 msleep(10000);
35697 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35698 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35699
35700 rc = controller_reset_failed(h->cfgtable);
35701 if (rc)
35702 @@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
35703 }
35704
35705 /* Turn the interrupts on so we can service requests */
35706 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35707 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35708
35709 hpsa_hba_inquiry(h);
35710 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35711 @@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35712 * To write all data in the battery backed cache to disks
35713 */
35714 hpsa_flush_cache(h);
35715 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35716 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35717 free_irq(h->intr[h->intr_mode], h);
35718 #ifdef CONFIG_PCI_MSI
35719 if (h->msix_vector)
35720 @@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35721 return;
35722 }
35723 /* Change the access methods to the performant access methods */
35724 - h->access = SA5_performant_access;
35725 + h->access = &SA5_performant_access;
35726 h->transMethod = CFGTBL_Trans_Performant;
35727 }
35728
35729 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35730 index 91edafb..a9b88ec 100644
35731 --- a/drivers/scsi/hpsa.h
35732 +++ b/drivers/scsi/hpsa.h
35733 @@ -73,7 +73,7 @@ struct ctlr_info {
35734 unsigned int msix_vector;
35735 unsigned int msi_vector;
35736 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35737 - struct access_method access;
35738 + struct access_method *access;
35739
35740 /* queue and queue Info */
35741 struct list_head reqQ;
35742 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35743 index f2df059..a3a9930 100644
35744 --- a/drivers/scsi/ips.h
35745 +++ b/drivers/scsi/ips.h
35746 @@ -1027,7 +1027,7 @@ typedef struct {
35747 int (*intr)(struct ips_ha *);
35748 void (*enableint)(struct ips_ha *);
35749 uint32_t (*statupd)(struct ips_ha *);
35750 -} ips_hw_func_t;
35751 +} __no_const ips_hw_func_t;
35752
35753 typedef struct ips_ha {
35754 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35755 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35756 index 4d70d96..84d0573 100644
35757 --- a/drivers/scsi/libfc/fc_exch.c
35758 +++ b/drivers/scsi/libfc/fc_exch.c
35759 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
35760 * all together if not used XXX
35761 */
35762 struct {
35763 - atomic_t no_free_exch;
35764 - atomic_t no_free_exch_xid;
35765 - atomic_t xid_not_found;
35766 - atomic_t xid_busy;
35767 - atomic_t seq_not_found;
35768 - atomic_t non_bls_resp;
35769 + atomic_unchecked_t no_free_exch;
35770 + atomic_unchecked_t no_free_exch_xid;
35771 + atomic_unchecked_t xid_not_found;
35772 + atomic_unchecked_t xid_busy;
35773 + atomic_unchecked_t seq_not_found;
35774 + atomic_unchecked_t non_bls_resp;
35775 } stats;
35776 };
35777
35778 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35779 /* allocate memory for exchange */
35780 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35781 if (!ep) {
35782 - atomic_inc(&mp->stats.no_free_exch);
35783 + atomic_inc_unchecked(&mp->stats.no_free_exch);
35784 goto out;
35785 }
35786 memset(ep, 0, sizeof(*ep));
35787 @@ -780,7 +780,7 @@ out:
35788 return ep;
35789 err:
35790 spin_unlock_bh(&pool->lock);
35791 - atomic_inc(&mp->stats.no_free_exch_xid);
35792 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35793 mempool_free(ep, mp->ep_pool);
35794 return NULL;
35795 }
35796 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35797 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35798 ep = fc_exch_find(mp, xid);
35799 if (!ep) {
35800 - atomic_inc(&mp->stats.xid_not_found);
35801 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35802 reject = FC_RJT_OX_ID;
35803 goto out;
35804 }
35805 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35806 ep = fc_exch_find(mp, xid);
35807 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35808 if (ep) {
35809 - atomic_inc(&mp->stats.xid_busy);
35810 + atomic_inc_unchecked(&mp->stats.xid_busy);
35811 reject = FC_RJT_RX_ID;
35812 goto rel;
35813 }
35814 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35815 }
35816 xid = ep->xid; /* get our XID */
35817 } else if (!ep) {
35818 - atomic_inc(&mp->stats.xid_not_found);
35819 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35820 reject = FC_RJT_RX_ID; /* XID not found */
35821 goto out;
35822 }
35823 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35824 } else {
35825 sp = &ep->seq;
35826 if (sp->id != fh->fh_seq_id) {
35827 - atomic_inc(&mp->stats.seq_not_found);
35828 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35829 if (f_ctl & FC_FC_END_SEQ) {
35830 /*
35831 * Update sequence_id based on incoming last
35832 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35833
35834 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
35835 if (!ep) {
35836 - atomic_inc(&mp->stats.xid_not_found);
35837 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35838 goto out;
35839 }
35840 if (ep->esb_stat & ESB_ST_COMPLETE) {
35841 - atomic_inc(&mp->stats.xid_not_found);
35842 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35843 goto rel;
35844 }
35845 if (ep->rxid == FC_XID_UNKNOWN)
35846 ep->rxid = ntohs(fh->fh_rx_id);
35847 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
35848 - atomic_inc(&mp->stats.xid_not_found);
35849 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35850 goto rel;
35851 }
35852 if (ep->did != ntoh24(fh->fh_s_id) &&
35853 ep->did != FC_FID_FLOGI) {
35854 - atomic_inc(&mp->stats.xid_not_found);
35855 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35856 goto rel;
35857 }
35858 sof = fr_sof(fp);
35859 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35860 sp->ssb_stat |= SSB_ST_RESP;
35861 sp->id = fh->fh_seq_id;
35862 } else if (sp->id != fh->fh_seq_id) {
35863 - atomic_inc(&mp->stats.seq_not_found);
35864 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35865 goto rel;
35866 }
35867
35868 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35869 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
35870
35871 if (!sp)
35872 - atomic_inc(&mp->stats.xid_not_found);
35873 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35874 else
35875 - atomic_inc(&mp->stats.non_bls_resp);
35876 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
35877
35878 fc_frame_free(fp);
35879 }
35880 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
35881 index db9238f..4378ed2 100644
35882 --- a/drivers/scsi/libsas/sas_ata.c
35883 +++ b/drivers/scsi/libsas/sas_ata.c
35884 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
35885 .postreset = ata_std_postreset,
35886 .error_handler = ata_std_error_handler,
35887 .post_internal_cmd = sas_ata_post_internal,
35888 - .qc_defer = ata_std_qc_defer,
35889 + .qc_defer = ata_std_qc_defer,
35890 .qc_prep = ata_noop_qc_prep,
35891 .qc_issue = sas_ata_qc_issue,
35892 .qc_fill_rtf = sas_ata_qc_fill_rtf,
35893 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
35894 index 825f930..ce42672 100644
35895 --- a/drivers/scsi/lpfc/lpfc.h
35896 +++ b/drivers/scsi/lpfc/lpfc.h
35897 @@ -413,7 +413,7 @@ struct lpfc_vport {
35898 struct dentry *debug_nodelist;
35899 struct dentry *vport_debugfs_root;
35900 struct lpfc_debugfs_trc *disc_trc;
35901 - atomic_t disc_trc_cnt;
35902 + atomic_unchecked_t disc_trc_cnt;
35903 #endif
35904 uint8_t stat_data_enabled;
35905 uint8_t stat_data_blocked;
35906 @@ -821,8 +821,8 @@ struct lpfc_hba {
35907 struct timer_list fabric_block_timer;
35908 unsigned long bit_flags;
35909 #define FABRIC_COMANDS_BLOCKED 0
35910 - atomic_t num_rsrc_err;
35911 - atomic_t num_cmd_success;
35912 + atomic_unchecked_t num_rsrc_err;
35913 + atomic_unchecked_t num_cmd_success;
35914 unsigned long last_rsrc_error_time;
35915 unsigned long last_ramp_down_time;
35916 unsigned long last_ramp_up_time;
35917 @@ -852,7 +852,7 @@ struct lpfc_hba {
35918
35919 struct dentry *debug_slow_ring_trc;
35920 struct lpfc_debugfs_trc *slow_ring_trc;
35921 - atomic_t slow_ring_trc_cnt;
35922 + atomic_unchecked_t slow_ring_trc_cnt;
35923 /* iDiag debugfs sub-directory */
35924 struct dentry *idiag_root;
35925 struct dentry *idiag_pci_cfg;
35926 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
35927 index 3587a3f..d45b81b 100644
35928 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
35929 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
35930 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
35931
35932 #include <linux/debugfs.h>
35933
35934 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35935 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35936 static unsigned long lpfc_debugfs_start_time = 0L;
35937
35938 /* iDiag */
35939 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
35940 lpfc_debugfs_enable = 0;
35941
35942 len = 0;
35943 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
35944 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
35945 (lpfc_debugfs_max_disc_trc - 1);
35946 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
35947 dtp = vport->disc_trc + i;
35948 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
35949 lpfc_debugfs_enable = 0;
35950
35951 len = 0;
35952 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
35953 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
35954 (lpfc_debugfs_max_slow_ring_trc - 1);
35955 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
35956 dtp = phba->slow_ring_trc + i;
35957 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
35958 !vport || !vport->disc_trc)
35959 return;
35960
35961 - index = atomic_inc_return(&vport->disc_trc_cnt) &
35962 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
35963 (lpfc_debugfs_max_disc_trc - 1);
35964 dtp = vport->disc_trc + index;
35965 dtp->fmt = fmt;
35966 dtp->data1 = data1;
35967 dtp->data2 = data2;
35968 dtp->data3 = data3;
35969 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35970 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35971 dtp->jif = jiffies;
35972 #endif
35973 return;
35974 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
35975 !phba || !phba->slow_ring_trc)
35976 return;
35977
35978 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
35979 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
35980 (lpfc_debugfs_max_slow_ring_trc - 1);
35981 dtp = phba->slow_ring_trc + index;
35982 dtp->fmt = fmt;
35983 dtp->data1 = data1;
35984 dtp->data2 = data2;
35985 dtp->data3 = data3;
35986 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35987 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35988 dtp->jif = jiffies;
35989 #endif
35990 return;
35991 @@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
35992 "slow_ring buffer\n");
35993 goto debug_failed;
35994 }
35995 - atomic_set(&phba->slow_ring_trc_cnt, 0);
35996 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
35997 memset(phba->slow_ring_trc, 0,
35998 (sizeof(struct lpfc_debugfs_trc) *
35999 lpfc_debugfs_max_slow_ring_trc));
36000 @@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36001 "buffer\n");
36002 goto debug_failed;
36003 }
36004 - atomic_set(&vport->disc_trc_cnt, 0);
36005 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36006
36007 snprintf(name, sizeof(name), "discovery_trace");
36008 vport->debug_disc_trc =
36009 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36010 index dfea2da..8e17227 100644
36011 --- a/drivers/scsi/lpfc/lpfc_init.c
36012 +++ b/drivers/scsi/lpfc/lpfc_init.c
36013 @@ -10145,8 +10145,10 @@ lpfc_init(void)
36014 printk(LPFC_COPYRIGHT "\n");
36015
36016 if (lpfc_enable_npiv) {
36017 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36018 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36019 + pax_open_kernel();
36020 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36021 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36022 + pax_close_kernel();
36023 }
36024 lpfc_transport_template =
36025 fc_attach_transport(&lpfc_transport_functions);
36026 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36027 index c60f5d0..751535c 100644
36028 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36029 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36030 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36031 uint32_t evt_posted;
36032
36033 spin_lock_irqsave(&phba->hbalock, flags);
36034 - atomic_inc(&phba->num_rsrc_err);
36035 + atomic_inc_unchecked(&phba->num_rsrc_err);
36036 phba->last_rsrc_error_time = jiffies;
36037
36038 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36039 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36040 unsigned long flags;
36041 struct lpfc_hba *phba = vport->phba;
36042 uint32_t evt_posted;
36043 - atomic_inc(&phba->num_cmd_success);
36044 + atomic_inc_unchecked(&phba->num_cmd_success);
36045
36046 if (vport->cfg_lun_queue_depth <= queue_depth)
36047 return;
36048 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36049 unsigned long num_rsrc_err, num_cmd_success;
36050 int i;
36051
36052 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36053 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36054 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36055 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36056
36057 vports = lpfc_create_vport_work_array(phba);
36058 if (vports != NULL)
36059 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36060 }
36061 }
36062 lpfc_destroy_vport_work_array(phba, vports);
36063 - atomic_set(&phba->num_rsrc_err, 0);
36064 - atomic_set(&phba->num_cmd_success, 0);
36065 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36066 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36067 }
36068
36069 /**
36070 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36071 }
36072 }
36073 lpfc_destroy_vport_work_array(phba, vports);
36074 - atomic_set(&phba->num_rsrc_err, 0);
36075 - atomic_set(&phba->num_cmd_success, 0);
36076 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36077 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36078 }
36079
36080 /**
36081 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36082 index ea8a0b4..812a124 100644
36083 --- a/drivers/scsi/pmcraid.c
36084 +++ b/drivers/scsi/pmcraid.c
36085 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36086 res->scsi_dev = scsi_dev;
36087 scsi_dev->hostdata = res;
36088 res->change_detected = 0;
36089 - atomic_set(&res->read_failures, 0);
36090 - atomic_set(&res->write_failures, 0);
36091 + atomic_set_unchecked(&res->read_failures, 0);
36092 + atomic_set_unchecked(&res->write_failures, 0);
36093 rc = 0;
36094 }
36095 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36096 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36097
36098 /* If this was a SCSI read/write command keep count of errors */
36099 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36100 - atomic_inc(&res->read_failures);
36101 + atomic_inc_unchecked(&res->read_failures);
36102 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36103 - atomic_inc(&res->write_failures);
36104 + atomic_inc_unchecked(&res->write_failures);
36105
36106 if (!RES_IS_GSCSI(res->cfg_entry) &&
36107 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36108 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36109 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36110 * hrrq_id assigned here in queuecommand
36111 */
36112 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36113 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36114 pinstance->num_hrrq;
36115 cmd->cmd_done = pmcraid_io_done;
36116
36117 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36118 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36119 * hrrq_id assigned here in queuecommand
36120 */
36121 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36122 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36123 pinstance->num_hrrq;
36124
36125 if (request_size) {
36126 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36127
36128 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36129 /* add resources only after host is added into system */
36130 - if (!atomic_read(&pinstance->expose_resources))
36131 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36132 return;
36133
36134 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36135 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36136 init_waitqueue_head(&pinstance->reset_wait_q);
36137
36138 atomic_set(&pinstance->outstanding_cmds, 0);
36139 - atomic_set(&pinstance->last_message_id, 0);
36140 - atomic_set(&pinstance->expose_resources, 0);
36141 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36142 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36143
36144 INIT_LIST_HEAD(&pinstance->free_res_q);
36145 INIT_LIST_HEAD(&pinstance->used_res_q);
36146 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36147 /* Schedule worker thread to handle CCN and take care of adding and
36148 * removing devices to OS
36149 */
36150 - atomic_set(&pinstance->expose_resources, 1);
36151 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36152 schedule_work(&pinstance->worker_q);
36153 return rc;
36154
36155 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36156 index ca496c7..9c791d5 100644
36157 --- a/drivers/scsi/pmcraid.h
36158 +++ b/drivers/scsi/pmcraid.h
36159 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36160 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36161
36162 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36163 - atomic_t last_message_id;
36164 + atomic_unchecked_t last_message_id;
36165
36166 /* configuration table */
36167 struct pmcraid_config_table *cfg_table;
36168 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36169 atomic_t outstanding_cmds;
36170
36171 /* should add/delete resources to mid-layer now ?*/
36172 - atomic_t expose_resources;
36173 + atomic_unchecked_t expose_resources;
36174
36175
36176
36177 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36178 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36179 };
36180 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36181 - atomic_t read_failures; /* count of failed READ commands */
36182 - atomic_t write_failures; /* count of failed WRITE commands */
36183 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36184 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36185
36186 /* To indicate add/delete/modify during CCN */
36187 u8 change_detected;
36188 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36189 index af1003f..be55a75 100644
36190 --- a/drivers/scsi/qla2xxx/qla_def.h
36191 +++ b/drivers/scsi/qla2xxx/qla_def.h
36192 @@ -2247,7 +2247,7 @@ struct isp_operations {
36193 int (*start_scsi) (srb_t *);
36194 int (*abort_isp) (struct scsi_qla_host *);
36195 int (*iospace_config)(struct qla_hw_data*);
36196 -};
36197 +} __no_const;
36198
36199 /* MSI-X Support *************************************************************/
36200
36201 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36202 index bfe6854..ceac088 100644
36203 --- a/drivers/scsi/qla4xxx/ql4_def.h
36204 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36205 @@ -261,7 +261,7 @@ struct ddb_entry {
36206 * (4000 only) */
36207 atomic_t relogin_timer; /* Max Time to wait for
36208 * relogin to complete */
36209 - atomic_t relogin_retry_count; /* Num of times relogin has been
36210 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36211 * retried */
36212 uint32_t default_time2wait; /* Default Min time between
36213 * relogins (+aens) */
36214 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36215 index ce6d3b7..73fac54 100644
36216 --- a/drivers/scsi/qla4xxx/ql4_os.c
36217 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36218 @@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36219 */
36220 if (!iscsi_is_session_online(cls_sess)) {
36221 /* Reset retry relogin timer */
36222 - atomic_inc(&ddb_entry->relogin_retry_count);
36223 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36224 DEBUG2(ql4_printk(KERN_INFO, ha,
36225 "%s: index[%d] relogin timed out-retrying"
36226 " relogin (%d), retry (%d)\n", __func__,
36227 ddb_entry->fw_ddb_index,
36228 - atomic_read(&ddb_entry->relogin_retry_count),
36229 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36230 ddb_entry->default_time2wait + 4));
36231 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36232 atomic_set(&ddb_entry->retry_relogin_timer,
36233 @@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36234
36235 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36236 atomic_set(&ddb_entry->relogin_timer, 0);
36237 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36238 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36239 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36240 ddb_entry->default_relogin_timeout =
36241 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36242 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36243 index 2aeb2e9..46e3925 100644
36244 --- a/drivers/scsi/scsi.c
36245 +++ b/drivers/scsi/scsi.c
36246 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36247 unsigned long timeout;
36248 int rtn = 0;
36249
36250 - atomic_inc(&cmd->device->iorequest_cnt);
36251 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36252
36253 /* check if the device is still usable */
36254 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36255 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36256 index b2c95db..227d74e 100644
36257 --- a/drivers/scsi/scsi_lib.c
36258 +++ b/drivers/scsi/scsi_lib.c
36259 @@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36260 shost = sdev->host;
36261 scsi_init_cmd_errh(cmd);
36262 cmd->result = DID_NO_CONNECT << 16;
36263 - atomic_inc(&cmd->device->iorequest_cnt);
36264 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36265
36266 /*
36267 * SCSI request completion path will do scsi_device_unbusy(),
36268 @@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
36269
36270 INIT_LIST_HEAD(&cmd->eh_entry);
36271
36272 - atomic_inc(&cmd->device->iodone_cnt);
36273 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36274 if (cmd->result)
36275 - atomic_inc(&cmd->device->ioerr_cnt);
36276 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36277
36278 disposition = scsi_decide_disposition(cmd);
36279 if (disposition != SUCCESS &&
36280 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36281 index 04c2a27..9d8bd66 100644
36282 --- a/drivers/scsi/scsi_sysfs.c
36283 +++ b/drivers/scsi/scsi_sysfs.c
36284 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36285 char *buf) \
36286 { \
36287 struct scsi_device *sdev = to_scsi_device(dev); \
36288 - unsigned long long count = atomic_read(&sdev->field); \
36289 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36290 return snprintf(buf, 20, "0x%llx\n", count); \
36291 } \
36292 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36293 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36294 index 84a1fdf..693b0d6 100644
36295 --- a/drivers/scsi/scsi_tgt_lib.c
36296 +++ b/drivers/scsi/scsi_tgt_lib.c
36297 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36298 int err;
36299
36300 dprintk("%lx %u\n", uaddr, len);
36301 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36302 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36303 if (err) {
36304 /*
36305 * TODO: need to fixup sg_tablesize, max_segment_size,
36306 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36307 index f59d4a0..1d89407 100644
36308 --- a/drivers/scsi/scsi_transport_fc.c
36309 +++ b/drivers/scsi/scsi_transport_fc.c
36310 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36311 * Netlink Infrastructure
36312 */
36313
36314 -static atomic_t fc_event_seq;
36315 +static atomic_unchecked_t fc_event_seq;
36316
36317 /**
36318 * fc_get_event_number - Obtain the next sequential FC event number
36319 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36320 u32
36321 fc_get_event_number(void)
36322 {
36323 - return atomic_add_return(1, &fc_event_seq);
36324 + return atomic_add_return_unchecked(1, &fc_event_seq);
36325 }
36326 EXPORT_SYMBOL(fc_get_event_number);
36327
36328 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36329 {
36330 int error;
36331
36332 - atomic_set(&fc_event_seq, 0);
36333 + atomic_set_unchecked(&fc_event_seq, 0);
36334
36335 error = transport_class_register(&fc_host_class);
36336 if (error)
36337 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36338 char *cp;
36339
36340 *val = simple_strtoul(buf, &cp, 0);
36341 - if ((*cp && (*cp != '\n')) || (*val < 0))
36342 + if (*cp && (*cp != '\n'))
36343 return -EINVAL;
36344 /*
36345 * Check for overflow; dev_loss_tmo is u32
36346 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36347 index cfd4914..ddd7129 100644
36348 --- a/drivers/scsi/scsi_transport_iscsi.c
36349 +++ b/drivers/scsi/scsi_transport_iscsi.c
36350 @@ -79,7 +79,7 @@ struct iscsi_internal {
36351 struct transport_container session_cont;
36352 };
36353
36354 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36355 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36356 static struct workqueue_struct *iscsi_eh_timer_workq;
36357
36358 static DEFINE_IDA(iscsi_sess_ida);
36359 @@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36360 int err;
36361
36362 ihost = shost->shost_data;
36363 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36364 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36365
36366 if (target_id == ISCSI_MAX_TARGET) {
36367 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36368 @@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
36369 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36370 ISCSI_TRANSPORT_VERSION);
36371
36372 - atomic_set(&iscsi_session_nr, 0);
36373 + atomic_set_unchecked(&iscsi_session_nr, 0);
36374
36375 err = class_register(&iscsi_transport_class);
36376 if (err)
36377 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36378 index 21a045e..ec89e03 100644
36379 --- a/drivers/scsi/scsi_transport_srp.c
36380 +++ b/drivers/scsi/scsi_transport_srp.c
36381 @@ -33,7 +33,7 @@
36382 #include "scsi_transport_srp_internal.h"
36383
36384 struct srp_host_attrs {
36385 - atomic_t next_port_id;
36386 + atomic_unchecked_t next_port_id;
36387 };
36388 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36389
36390 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36391 struct Scsi_Host *shost = dev_to_shost(dev);
36392 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36393
36394 - atomic_set(&srp_host->next_port_id, 0);
36395 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36396 return 0;
36397 }
36398
36399 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36400 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36401 rport->roles = ids->roles;
36402
36403 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36404 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36405 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36406
36407 transport_setup_device(&rport->dev);
36408 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36409 index eacd46b..e3f4d62 100644
36410 --- a/drivers/scsi/sg.c
36411 +++ b/drivers/scsi/sg.c
36412 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36413 sdp->disk->disk_name,
36414 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36415 NULL,
36416 - (char *)arg);
36417 + (char __user *)arg);
36418 case BLKTRACESTART:
36419 return blk_trace_startstop(sdp->device->request_queue, 1);
36420 case BLKTRACESTOP:
36421 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36422 const struct file_operations * fops;
36423 };
36424
36425 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36426 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36427 {"allow_dio", &adio_fops},
36428 {"debug", &debug_fops},
36429 {"def_reserved_size", &dressz_fops},
36430 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
36431 if (!sg_proc_sgp)
36432 return 1;
36433 for (k = 0; k < num_leaves; ++k) {
36434 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36435 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36436 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36437 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36438 }
36439 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36440 index f64250e..1ee3049 100644
36441 --- a/drivers/spi/spi-dw-pci.c
36442 +++ b/drivers/spi/spi-dw-pci.c
36443 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
36444 #define spi_resume NULL
36445 #endif
36446
36447 -static const struct pci_device_id pci_ids[] __devinitdata = {
36448 +static const struct pci_device_id pci_ids[] __devinitconst = {
36449 /* Intel MID platform SPI controller 0 */
36450 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36451 {},
36452 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36453 index b2ccdea..84cde75 100644
36454 --- a/drivers/spi/spi.c
36455 +++ b/drivers/spi/spi.c
36456 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
36457 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36458
36459 /* portable code must never pass more than 32 bytes */
36460 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36461 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36462
36463 static u8 *buf;
36464
36465 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36466 index 400df8c..065d4f4 100644
36467 --- a/drivers/staging/octeon/ethernet-rx.c
36468 +++ b/drivers/staging/octeon/ethernet-rx.c
36469 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36470 /* Increment RX stats for virtual ports */
36471 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36472 #ifdef CONFIG_64BIT
36473 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36474 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36475 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36476 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36477 #else
36478 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36479 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36480 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36481 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36482 #endif
36483 }
36484 netif_receive_skb(skb);
36485 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36486 dev->name);
36487 */
36488 #ifdef CONFIG_64BIT
36489 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36490 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36491 #else
36492 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36493 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36494 #endif
36495 dev_kfree_skb_irq(skb);
36496 }
36497 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36498 index 9112cd8..92f8d51 100644
36499 --- a/drivers/staging/octeon/ethernet.c
36500 +++ b/drivers/staging/octeon/ethernet.c
36501 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36502 * since the RX tasklet also increments it.
36503 */
36504 #ifdef CONFIG_64BIT
36505 - atomic64_add(rx_status.dropped_packets,
36506 - (atomic64_t *)&priv->stats.rx_dropped);
36507 + atomic64_add_unchecked(rx_status.dropped_packets,
36508 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36509 #else
36510 - atomic_add(rx_status.dropped_packets,
36511 - (atomic_t *)&priv->stats.rx_dropped);
36512 + atomic_add_unchecked(rx_status.dropped_packets,
36513 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36514 #endif
36515 }
36516
36517 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36518 index 86308a0..feaa925 100644
36519 --- a/drivers/staging/rtl8712/rtl871x_io.h
36520 +++ b/drivers/staging/rtl8712/rtl871x_io.h
36521 @@ -108,7 +108,7 @@ struct _io_ops {
36522 u8 *pmem);
36523 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36524 u8 *pmem);
36525 -};
36526 +} __no_const;
36527
36528 struct io_req {
36529 struct list_head list;
36530 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
36531 index c7b5e8b..783d6cb 100644
36532 --- a/drivers/staging/sbe-2t3e3/netdev.c
36533 +++ b/drivers/staging/sbe-2t3e3/netdev.c
36534 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36535 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36536
36537 if (rlen)
36538 - if (copy_to_user(data, &resp, rlen))
36539 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
36540 return -EFAULT;
36541
36542 return 0;
36543 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
36544 index 42cdafe..2769103 100644
36545 --- a/drivers/staging/speakup/speakup_soft.c
36546 +++ b/drivers/staging/speakup/speakup_soft.c
36547 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
36548 break;
36549 } else if (!initialized) {
36550 if (*init) {
36551 - ch = *init;
36552 init++;
36553 } else {
36554 initialized = 1;
36555 }
36556 + ch = *init;
36557 } else {
36558 ch = synth_buffer_getc();
36559 }
36560 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
36561 index b8f8c48..1fc5025 100644
36562 --- a/drivers/staging/usbip/usbip_common.h
36563 +++ b/drivers/staging/usbip/usbip_common.h
36564 @@ -289,7 +289,7 @@ struct usbip_device {
36565 void (*shutdown)(struct usbip_device *);
36566 void (*reset)(struct usbip_device *);
36567 void (*unusable)(struct usbip_device *);
36568 - } eh_ops;
36569 + } __no_const eh_ops;
36570 };
36571
36572 /* usbip_common.c */
36573 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
36574 index 88b3298..3783eee 100644
36575 --- a/drivers/staging/usbip/vhci.h
36576 +++ b/drivers/staging/usbip/vhci.h
36577 @@ -88,7 +88,7 @@ struct vhci_hcd {
36578 unsigned resuming:1;
36579 unsigned long re_timeout;
36580
36581 - atomic_t seqnum;
36582 + atomic_unchecked_t seqnum;
36583
36584 /*
36585 * NOTE:
36586 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
36587 index 2ee97e2..0420b86 100644
36588 --- a/drivers/staging/usbip/vhci_hcd.c
36589 +++ b/drivers/staging/usbip/vhci_hcd.c
36590 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
36591 return;
36592 }
36593
36594 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
36595 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36596 if (priv->seqnum == 0xffff)
36597 dev_info(&urb->dev->dev, "seqnum max\n");
36598
36599 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
36600 return -ENOMEM;
36601 }
36602
36603 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
36604 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36605 if (unlink->seqnum == 0xffff)
36606 pr_info("seqnum max\n");
36607
36608 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
36609 vdev->rhport = rhport;
36610 }
36611
36612 - atomic_set(&vhci->seqnum, 0);
36613 + atomic_set_unchecked(&vhci->seqnum, 0);
36614 spin_lock_init(&vhci->lock);
36615
36616 hcd->power_budget = 0; /* no limit */
36617 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
36618 index 3f511b4..d3dbc1e 100644
36619 --- a/drivers/staging/usbip/vhci_rx.c
36620 +++ b/drivers/staging/usbip/vhci_rx.c
36621 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
36622 if (!urb) {
36623 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
36624 pr_info("max seqnum %d\n",
36625 - atomic_read(&the_controller->seqnum));
36626 + atomic_read_unchecked(&the_controller->seqnum));
36627 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
36628 return;
36629 }
36630 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
36631 index 7735027..30eed13 100644
36632 --- a/drivers/staging/vt6655/hostap.c
36633 +++ b/drivers/staging/vt6655/hostap.c
36634 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
36635 *
36636 */
36637
36638 +static net_device_ops_no_const apdev_netdev_ops;
36639 +
36640 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36641 {
36642 PSDevice apdev_priv;
36643 struct net_device *dev = pDevice->dev;
36644 int ret;
36645 - const struct net_device_ops apdev_netdev_ops = {
36646 - .ndo_start_xmit = pDevice->tx_80211,
36647 - };
36648
36649 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36650
36651 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36652 *apdev_priv = *pDevice;
36653 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36654
36655 + /* only half broken now */
36656 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36657 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36658
36659 pDevice->apdev->type = ARPHRD_IEEE80211;
36660 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
36661 index 51b5adf..098e320 100644
36662 --- a/drivers/staging/vt6656/hostap.c
36663 +++ b/drivers/staging/vt6656/hostap.c
36664 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
36665 *
36666 */
36667
36668 +static net_device_ops_no_const apdev_netdev_ops;
36669 +
36670 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36671 {
36672 PSDevice apdev_priv;
36673 struct net_device *dev = pDevice->dev;
36674 int ret;
36675 - const struct net_device_ops apdev_netdev_ops = {
36676 - .ndo_start_xmit = pDevice->tx_80211,
36677 - };
36678
36679 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36680
36681 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36682 *apdev_priv = *pDevice;
36683 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36684
36685 + /* only half broken now */
36686 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36687 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36688
36689 pDevice->apdev->type = ARPHRD_IEEE80211;
36690 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
36691 index 7843dfd..3db105f 100644
36692 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
36693 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
36694 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
36695
36696 struct usbctlx_completor {
36697 int (*complete) (struct usbctlx_completor *);
36698 -};
36699 +} __no_const;
36700
36701 static int
36702 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
36703 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
36704 index 1ca66ea..76f1343 100644
36705 --- a/drivers/staging/zcache/tmem.c
36706 +++ b/drivers/staging/zcache/tmem.c
36707 @@ -39,7 +39,7 @@
36708 * A tmem host implementation must use this function to register callbacks
36709 * for memory allocation.
36710 */
36711 -static struct tmem_hostops tmem_hostops;
36712 +static tmem_hostops_no_const tmem_hostops;
36713
36714 static void tmem_objnode_tree_init(void);
36715
36716 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
36717 * A tmem host implementation must use this function to register
36718 * callbacks for a page-accessible memory (PAM) implementation
36719 */
36720 -static struct tmem_pamops tmem_pamops;
36721 +static tmem_pamops_no_const tmem_pamops;
36722
36723 void tmem_register_pamops(struct tmem_pamops *m)
36724 {
36725 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
36726 index ed147c4..94fc3c6 100644
36727 --- a/drivers/staging/zcache/tmem.h
36728 +++ b/drivers/staging/zcache/tmem.h
36729 @@ -180,6 +180,7 @@ struct tmem_pamops {
36730 void (*new_obj)(struct tmem_obj *);
36731 int (*replace_in_obj)(void *, struct tmem_obj *);
36732 };
36733 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
36734 extern void tmem_register_pamops(struct tmem_pamops *m);
36735
36736 /* memory allocation methods provided by the host implementation */
36737 @@ -189,6 +190,7 @@ struct tmem_hostops {
36738 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
36739 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
36740 };
36741 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
36742 extern void tmem_register_hostops(struct tmem_hostops *m);
36743
36744 /* core tmem accessor functions */
36745 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
36746 index 501b27c..39dc3d3 100644
36747 --- a/drivers/target/iscsi/iscsi_target.c
36748 +++ b/drivers/target/iscsi/iscsi_target.c
36749 @@ -1363,7 +1363,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
36750 * outstanding_r2ts reaches zero, go ahead and send the delayed
36751 * TASK_ABORTED status.
36752 */
36753 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
36754 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
36755 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
36756 if (--cmd->outstanding_r2ts < 1) {
36757 iscsit_stop_dataout_timer(cmd);
36758 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
36759 index dcb0618..97e3d85 100644
36760 --- a/drivers/target/target_core_tmr.c
36761 +++ b/drivers/target/target_core_tmr.c
36762 @@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
36763 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
36764 cmd->t_task_list_num,
36765 atomic_read(&cmd->t_task_cdbs_left),
36766 - atomic_read(&cmd->t_task_cdbs_sent),
36767 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36768 atomic_read(&cmd->t_transport_active),
36769 atomic_read(&cmd->t_transport_stop),
36770 atomic_read(&cmd->t_transport_sent));
36771 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
36772 pr_debug("LUN_RESET: got t_transport_active = 1 for"
36773 " task: %p, t_fe_count: %d dev: %p\n", task,
36774 fe_count, dev);
36775 - atomic_set(&cmd->t_transport_aborted, 1);
36776 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36777 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36778
36779 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36780 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
36781 }
36782 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
36783 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
36784 - atomic_set(&cmd->t_transport_aborted, 1);
36785 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36786 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36787
36788 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36789 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
36790 index cd5cd95..5249d30 100644
36791 --- a/drivers/target/target_core_transport.c
36792 +++ b/drivers/target/target_core_transport.c
36793 @@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
36794 spin_lock_init(&dev->se_port_lock);
36795 spin_lock_init(&dev->se_tmr_lock);
36796 spin_lock_init(&dev->qf_cmd_lock);
36797 - atomic_set(&dev->dev_ordered_id, 0);
36798 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
36799
36800 se_dev_set_default_attribs(dev, dev_limits);
36801
36802 @@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
36803 * Used to determine when ORDERED commands should go from
36804 * Dormant to Active status.
36805 */
36806 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
36807 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
36808 smp_mb__after_atomic_inc();
36809 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
36810 cmd->se_ordered_id, cmd->sam_task_attr,
36811 @@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
36812 " t_transport_active: %d t_transport_stop: %d"
36813 " t_transport_sent: %d\n", cmd->t_task_list_num,
36814 atomic_read(&cmd->t_task_cdbs_left),
36815 - atomic_read(&cmd->t_task_cdbs_sent),
36816 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36817 atomic_read(&cmd->t_task_cdbs_ex_left),
36818 atomic_read(&cmd->t_transport_active),
36819 atomic_read(&cmd->t_transport_stop),
36820 @@ -2121,9 +2121,9 @@ check_depth:
36821 cmd = task->task_se_cmd;
36822 spin_lock_irqsave(&cmd->t_state_lock, flags);
36823 task->task_flags |= (TF_ACTIVE | TF_SENT);
36824 - atomic_inc(&cmd->t_task_cdbs_sent);
36825 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
36826
36827 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
36828 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
36829 cmd->t_task_list_num)
36830 atomic_set(&cmd->t_transport_sent, 1);
36831
36832 @@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
36833 atomic_set(&cmd->transport_lun_stop, 0);
36834 }
36835 if (!atomic_read(&cmd->t_transport_active) ||
36836 - atomic_read(&cmd->t_transport_aborted)) {
36837 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
36838 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36839 return false;
36840 }
36841 @@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
36842 {
36843 int ret = 0;
36844
36845 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
36846 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
36847 if (!send_status ||
36848 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
36849 return 1;
36850 @@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
36851 */
36852 if (cmd->data_direction == DMA_TO_DEVICE) {
36853 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
36854 - atomic_inc(&cmd->t_transport_aborted);
36855 + atomic_inc_unchecked(&cmd->t_transport_aborted);
36856 smp_mb__after_atomic_inc();
36857 }
36858 }
36859 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
36860 index b9040be..e3f5aab 100644
36861 --- a/drivers/tty/hvc/hvcs.c
36862 +++ b/drivers/tty/hvc/hvcs.c
36863 @@ -83,6 +83,7 @@
36864 #include <asm/hvcserver.h>
36865 #include <asm/uaccess.h>
36866 #include <asm/vio.h>
36867 +#include <asm/local.h>
36868
36869 /*
36870 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
36871 @@ -270,7 +271,7 @@ struct hvcs_struct {
36872 unsigned int index;
36873
36874 struct tty_struct *tty;
36875 - int open_count;
36876 + local_t open_count;
36877
36878 /*
36879 * Used to tell the driver kernel_thread what operations need to take
36880 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
36881
36882 spin_lock_irqsave(&hvcsd->lock, flags);
36883
36884 - if (hvcsd->open_count > 0) {
36885 + if (local_read(&hvcsd->open_count) > 0) {
36886 spin_unlock_irqrestore(&hvcsd->lock, flags);
36887 printk(KERN_INFO "HVCS: vterm state unchanged. "
36888 "The hvcs device node is still in use.\n");
36889 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
36890 if ((retval = hvcs_partner_connect(hvcsd)))
36891 goto error_release;
36892
36893 - hvcsd->open_count = 1;
36894 + local_set(&hvcsd->open_count, 1);
36895 hvcsd->tty = tty;
36896 tty->driver_data = hvcsd;
36897
36898 @@ -1179,7 +1180,7 @@ fast_open:
36899
36900 spin_lock_irqsave(&hvcsd->lock, flags);
36901 kref_get(&hvcsd->kref);
36902 - hvcsd->open_count++;
36903 + local_inc(&hvcsd->open_count);
36904 hvcsd->todo_mask |= HVCS_SCHED_READ;
36905 spin_unlock_irqrestore(&hvcsd->lock, flags);
36906
36907 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
36908 hvcsd = tty->driver_data;
36909
36910 spin_lock_irqsave(&hvcsd->lock, flags);
36911 - if (--hvcsd->open_count == 0) {
36912 + if (local_dec_and_test(&hvcsd->open_count)) {
36913
36914 vio_disable_interrupts(hvcsd->vdev);
36915
36916 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
36917 free_irq(irq, hvcsd);
36918 kref_put(&hvcsd->kref, destroy_hvcs_struct);
36919 return;
36920 - } else if (hvcsd->open_count < 0) {
36921 + } else if (local_read(&hvcsd->open_count) < 0) {
36922 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
36923 " is missmanaged.\n",
36924 - hvcsd->vdev->unit_address, hvcsd->open_count);
36925 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
36926 }
36927
36928 spin_unlock_irqrestore(&hvcsd->lock, flags);
36929 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
36930
36931 spin_lock_irqsave(&hvcsd->lock, flags);
36932 /* Preserve this so that we know how many kref refs to put */
36933 - temp_open_count = hvcsd->open_count;
36934 + temp_open_count = local_read(&hvcsd->open_count);
36935
36936 /*
36937 * Don't kref put inside the spinlock because the destruction
36938 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
36939 hvcsd->tty->driver_data = NULL;
36940 hvcsd->tty = NULL;
36941
36942 - hvcsd->open_count = 0;
36943 + local_set(&hvcsd->open_count, 0);
36944
36945 /* This will drop any buffered data on the floor which is OK in a hangup
36946 * scenario. */
36947 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
36948 * the middle of a write operation? This is a crummy place to do this
36949 * but we want to keep it all in the spinlock.
36950 */
36951 - if (hvcsd->open_count <= 0) {
36952 + if (local_read(&hvcsd->open_count) <= 0) {
36953 spin_unlock_irqrestore(&hvcsd->lock, flags);
36954 return -ENODEV;
36955 }
36956 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
36957 {
36958 struct hvcs_struct *hvcsd = tty->driver_data;
36959
36960 - if (!hvcsd || hvcsd->open_count <= 0)
36961 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
36962 return 0;
36963
36964 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
36965 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
36966 index ef92869..f4ebd88 100644
36967 --- a/drivers/tty/ipwireless/tty.c
36968 +++ b/drivers/tty/ipwireless/tty.c
36969 @@ -29,6 +29,7 @@
36970 #include <linux/tty_driver.h>
36971 #include <linux/tty_flip.h>
36972 #include <linux/uaccess.h>
36973 +#include <asm/local.h>
36974
36975 #include "tty.h"
36976 #include "network.h"
36977 @@ -51,7 +52,7 @@ struct ipw_tty {
36978 int tty_type;
36979 struct ipw_network *network;
36980 struct tty_struct *linux_tty;
36981 - int open_count;
36982 + local_t open_count;
36983 unsigned int control_lines;
36984 struct mutex ipw_tty_mutex;
36985 int tx_bytes_queued;
36986 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
36987 mutex_unlock(&tty->ipw_tty_mutex);
36988 return -ENODEV;
36989 }
36990 - if (tty->open_count == 0)
36991 + if (local_read(&tty->open_count) == 0)
36992 tty->tx_bytes_queued = 0;
36993
36994 - tty->open_count++;
36995 + local_inc(&tty->open_count);
36996
36997 tty->linux_tty = linux_tty;
36998 linux_tty->driver_data = tty;
36999 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37000
37001 static void do_ipw_close(struct ipw_tty *tty)
37002 {
37003 - tty->open_count--;
37004 -
37005 - if (tty->open_count == 0) {
37006 + if (local_dec_return(&tty->open_count) == 0) {
37007 struct tty_struct *linux_tty = tty->linux_tty;
37008
37009 if (linux_tty != NULL) {
37010 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37011 return;
37012
37013 mutex_lock(&tty->ipw_tty_mutex);
37014 - if (tty->open_count == 0) {
37015 + if (local_read(&tty->open_count) == 0) {
37016 mutex_unlock(&tty->ipw_tty_mutex);
37017 return;
37018 }
37019 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37020 return;
37021 }
37022
37023 - if (!tty->open_count) {
37024 + if (!local_read(&tty->open_count)) {
37025 mutex_unlock(&tty->ipw_tty_mutex);
37026 return;
37027 }
37028 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37029 return -ENODEV;
37030
37031 mutex_lock(&tty->ipw_tty_mutex);
37032 - if (!tty->open_count) {
37033 + if (!local_read(&tty->open_count)) {
37034 mutex_unlock(&tty->ipw_tty_mutex);
37035 return -EINVAL;
37036 }
37037 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37038 if (!tty)
37039 return -ENODEV;
37040
37041 - if (!tty->open_count)
37042 + if (!local_read(&tty->open_count))
37043 return -EINVAL;
37044
37045 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37046 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37047 if (!tty)
37048 return 0;
37049
37050 - if (!tty->open_count)
37051 + if (!local_read(&tty->open_count))
37052 return 0;
37053
37054 return tty->tx_bytes_queued;
37055 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37056 if (!tty)
37057 return -ENODEV;
37058
37059 - if (!tty->open_count)
37060 + if (!local_read(&tty->open_count))
37061 return -EINVAL;
37062
37063 return get_control_lines(tty);
37064 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37065 if (!tty)
37066 return -ENODEV;
37067
37068 - if (!tty->open_count)
37069 + if (!local_read(&tty->open_count))
37070 return -EINVAL;
37071
37072 return set_control_lines(tty, set, clear);
37073 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37074 if (!tty)
37075 return -ENODEV;
37076
37077 - if (!tty->open_count)
37078 + if (!local_read(&tty->open_count))
37079 return -EINVAL;
37080
37081 /* FIXME: Exactly how is the tty object locked here .. */
37082 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37083 against a parallel ioctl etc */
37084 mutex_lock(&ttyj->ipw_tty_mutex);
37085 }
37086 - while (ttyj->open_count)
37087 + while (local_read(&ttyj->open_count))
37088 do_ipw_close(ttyj);
37089 ipwireless_disassociate_network_ttys(network,
37090 ttyj->channel_idx);
37091 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37092 index fc7bbba..9527e93 100644
37093 --- a/drivers/tty/n_gsm.c
37094 +++ b/drivers/tty/n_gsm.c
37095 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37096 kref_init(&dlci->ref);
37097 mutex_init(&dlci->mutex);
37098 dlci->fifo = &dlci->_fifo;
37099 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37100 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37101 kfree(dlci);
37102 return NULL;
37103 }
37104 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37105 index d2256d0..97476fa 100644
37106 --- a/drivers/tty/n_tty.c
37107 +++ b/drivers/tty/n_tty.c
37108 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37109 {
37110 *ops = tty_ldisc_N_TTY;
37111 ops->owner = NULL;
37112 - ops->refcount = ops->flags = 0;
37113 + atomic_set(&ops->refcount, 0);
37114 + ops->flags = 0;
37115 }
37116 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37117 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37118 index d8653ab..f8afd9d 100644
37119 --- a/drivers/tty/pty.c
37120 +++ b/drivers/tty/pty.c
37121 @@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
37122 register_sysctl_table(pty_root_table);
37123
37124 /* Now create the /dev/ptmx special device */
37125 + pax_open_kernel();
37126 tty_default_fops(&ptmx_fops);
37127 - ptmx_fops.open = ptmx_open;
37128 + *(void **)&ptmx_fops.open = ptmx_open;
37129 + pax_close_kernel();
37130
37131 cdev_init(&ptmx_cdev, &ptmx_fops);
37132 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37133 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37134 index 2b42a01..32a2ed3 100644
37135 --- a/drivers/tty/serial/kgdboc.c
37136 +++ b/drivers/tty/serial/kgdboc.c
37137 @@ -24,8 +24,9 @@
37138 #define MAX_CONFIG_LEN 40
37139
37140 static struct kgdb_io kgdboc_io_ops;
37141 +static struct kgdb_io kgdboc_io_ops_console;
37142
37143 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37144 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37145 static int configured = -1;
37146
37147 static char config[MAX_CONFIG_LEN];
37148 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37149 kgdboc_unregister_kbd();
37150 if (configured == 1)
37151 kgdb_unregister_io_module(&kgdboc_io_ops);
37152 + else if (configured == 2)
37153 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37154 }
37155
37156 static int configure_kgdboc(void)
37157 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37158 int err;
37159 char *cptr = config;
37160 struct console *cons;
37161 + int is_console = 0;
37162
37163 err = kgdboc_option_setup(config);
37164 if (err || !strlen(config) || isspace(config[0]))
37165 goto noconfig;
37166
37167 err = -ENODEV;
37168 - kgdboc_io_ops.is_console = 0;
37169 kgdb_tty_driver = NULL;
37170
37171 kgdboc_use_kms = 0;
37172 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37173 int idx;
37174 if (cons->device && cons->device(cons, &idx) == p &&
37175 idx == tty_line) {
37176 - kgdboc_io_ops.is_console = 1;
37177 + is_console = 1;
37178 break;
37179 }
37180 cons = cons->next;
37181 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37182 kgdb_tty_line = tty_line;
37183
37184 do_register:
37185 - err = kgdb_register_io_module(&kgdboc_io_ops);
37186 + if (is_console) {
37187 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37188 + configured = 2;
37189 + } else {
37190 + err = kgdb_register_io_module(&kgdboc_io_ops);
37191 + configured = 1;
37192 + }
37193 if (err)
37194 goto noconfig;
37195
37196 - configured = 1;
37197 -
37198 return 0;
37199
37200 noconfig:
37201 @@ -213,7 +220,7 @@ noconfig:
37202 static int __init init_kgdboc(void)
37203 {
37204 /* Already configured? */
37205 - if (configured == 1)
37206 + if (configured >= 1)
37207 return 0;
37208
37209 return configure_kgdboc();
37210 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37211 if (config[len - 1] == '\n')
37212 config[len - 1] = '\0';
37213
37214 - if (configured == 1)
37215 + if (configured >= 1)
37216 cleanup_kgdboc();
37217
37218 /* Go and configure with the new params. */
37219 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37220 .post_exception = kgdboc_post_exp_handler,
37221 };
37222
37223 +static struct kgdb_io kgdboc_io_ops_console = {
37224 + .name = "kgdboc",
37225 + .read_char = kgdboc_get_char,
37226 + .write_char = kgdboc_put_char,
37227 + .pre_exception = kgdboc_pre_exp_handler,
37228 + .post_exception = kgdboc_post_exp_handler,
37229 + .is_console = 1
37230 +};
37231 +
37232 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37233 /* This is only available if kgdboc is a built in for early debugging */
37234 static int __init kgdboc_early_init(char *opt)
37235 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37236 index e41b9bb..84002fb 100644
37237 --- a/drivers/tty/tty_io.c
37238 +++ b/drivers/tty/tty_io.c
37239 @@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37240
37241 void tty_default_fops(struct file_operations *fops)
37242 {
37243 - *fops = tty_fops;
37244 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37245 }
37246
37247 /*
37248 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37249 index 24b95db..9c078d0 100644
37250 --- a/drivers/tty/tty_ldisc.c
37251 +++ b/drivers/tty/tty_ldisc.c
37252 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37253 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37254 struct tty_ldisc_ops *ldo = ld->ops;
37255
37256 - ldo->refcount--;
37257 + atomic_dec(&ldo->refcount);
37258 module_put(ldo->owner);
37259 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37260
37261 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37262 spin_lock_irqsave(&tty_ldisc_lock, flags);
37263 tty_ldiscs[disc] = new_ldisc;
37264 new_ldisc->num = disc;
37265 - new_ldisc->refcount = 0;
37266 + atomic_set(&new_ldisc->refcount, 0);
37267 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37268
37269 return ret;
37270 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37271 return -EINVAL;
37272
37273 spin_lock_irqsave(&tty_ldisc_lock, flags);
37274 - if (tty_ldiscs[disc]->refcount)
37275 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37276 ret = -EBUSY;
37277 else
37278 tty_ldiscs[disc] = NULL;
37279 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37280 if (ldops) {
37281 ret = ERR_PTR(-EAGAIN);
37282 if (try_module_get(ldops->owner)) {
37283 - ldops->refcount++;
37284 + atomic_inc(&ldops->refcount);
37285 ret = ldops;
37286 }
37287 }
37288 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37289 unsigned long flags;
37290
37291 spin_lock_irqsave(&tty_ldisc_lock, flags);
37292 - ldops->refcount--;
37293 + atomic_dec(&ldops->refcount);
37294 module_put(ldops->owner);
37295 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37296 }
37297 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37298 index a605549..6bd3c96 100644
37299 --- a/drivers/tty/vt/keyboard.c
37300 +++ b/drivers/tty/vt/keyboard.c
37301 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37302 kbd->kbdmode == VC_OFF) &&
37303 value != KVAL(K_SAK))
37304 return; /* SAK is allowed even in raw mode */
37305 +
37306 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37307 + {
37308 + void *func = fn_handler[value];
37309 + if (func == fn_show_state || func == fn_show_ptregs ||
37310 + func == fn_show_mem)
37311 + return;
37312 + }
37313 +#endif
37314 +
37315 fn_handler[value](vc);
37316 }
37317
37318 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
37319 index 65447c5..0526f0a 100644
37320 --- a/drivers/tty/vt/vt_ioctl.c
37321 +++ b/drivers/tty/vt/vt_ioctl.c
37322 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37323 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37324 return -EFAULT;
37325
37326 - if (!capable(CAP_SYS_TTY_CONFIG))
37327 - perm = 0;
37328 -
37329 switch (cmd) {
37330 case KDGKBENT:
37331 key_map = key_maps[s];
37332 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37333 val = (i ? K_HOLE : K_NOSUCHMAP);
37334 return put_user(val, &user_kbe->kb_value);
37335 case KDSKBENT:
37336 + if (!capable(CAP_SYS_TTY_CONFIG))
37337 + perm = 0;
37338 +
37339 if (!perm)
37340 return -EPERM;
37341 if (!i && v == K_NOSUCHMAP) {
37342 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37343 int i, j, k;
37344 int ret;
37345
37346 - if (!capable(CAP_SYS_TTY_CONFIG))
37347 - perm = 0;
37348 -
37349 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37350 if (!kbs) {
37351 ret = -ENOMEM;
37352 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37353 kfree(kbs);
37354 return ((p && *p) ? -EOVERFLOW : 0);
37355 case KDSKBSENT:
37356 + if (!capable(CAP_SYS_TTY_CONFIG))
37357 + perm = 0;
37358 +
37359 if (!perm) {
37360 ret = -EPERM;
37361 goto reterr;
37362 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37363 index a783d53..cb30d94 100644
37364 --- a/drivers/uio/uio.c
37365 +++ b/drivers/uio/uio.c
37366 @@ -25,6 +25,7 @@
37367 #include <linux/kobject.h>
37368 #include <linux/cdev.h>
37369 #include <linux/uio_driver.h>
37370 +#include <asm/local.h>
37371
37372 #define UIO_MAX_DEVICES (1U << MINORBITS)
37373
37374 @@ -32,10 +33,10 @@ struct uio_device {
37375 struct module *owner;
37376 struct device *dev;
37377 int minor;
37378 - atomic_t event;
37379 + atomic_unchecked_t event;
37380 struct fasync_struct *async_queue;
37381 wait_queue_head_t wait;
37382 - int vma_count;
37383 + local_t vma_count;
37384 struct uio_info *info;
37385 struct kobject *map_dir;
37386 struct kobject *portio_dir;
37387 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37388 struct device_attribute *attr, char *buf)
37389 {
37390 struct uio_device *idev = dev_get_drvdata(dev);
37391 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37392 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37393 }
37394
37395 static struct device_attribute uio_class_attributes[] = {
37396 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37397 {
37398 struct uio_device *idev = info->uio_dev;
37399
37400 - atomic_inc(&idev->event);
37401 + atomic_inc_unchecked(&idev->event);
37402 wake_up_interruptible(&idev->wait);
37403 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37404 }
37405 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37406 }
37407
37408 listener->dev = idev;
37409 - listener->event_count = atomic_read(&idev->event);
37410 + listener->event_count = atomic_read_unchecked(&idev->event);
37411 filep->private_data = listener;
37412
37413 if (idev->info->open) {
37414 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37415 return -EIO;
37416
37417 poll_wait(filep, &idev->wait, wait);
37418 - if (listener->event_count != atomic_read(&idev->event))
37419 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37420 return POLLIN | POLLRDNORM;
37421 return 0;
37422 }
37423 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37424 do {
37425 set_current_state(TASK_INTERRUPTIBLE);
37426
37427 - event_count = atomic_read(&idev->event);
37428 + event_count = atomic_read_unchecked(&idev->event);
37429 if (event_count != listener->event_count) {
37430 if (copy_to_user(buf, &event_count, count))
37431 retval = -EFAULT;
37432 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37433 static void uio_vma_open(struct vm_area_struct *vma)
37434 {
37435 struct uio_device *idev = vma->vm_private_data;
37436 - idev->vma_count++;
37437 + local_inc(&idev->vma_count);
37438 }
37439
37440 static void uio_vma_close(struct vm_area_struct *vma)
37441 {
37442 struct uio_device *idev = vma->vm_private_data;
37443 - idev->vma_count--;
37444 + local_dec(&idev->vma_count);
37445 }
37446
37447 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37448 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37449 idev->owner = owner;
37450 idev->info = info;
37451 init_waitqueue_head(&idev->wait);
37452 - atomic_set(&idev->event, 0);
37453 + atomic_set_unchecked(&idev->event, 0);
37454
37455 ret = uio_get_minor(idev);
37456 if (ret)
37457 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37458 index 98b89fe..aff824e 100644
37459 --- a/drivers/usb/atm/cxacru.c
37460 +++ b/drivers/usb/atm/cxacru.c
37461 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37462 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37463 if (ret < 2)
37464 return -EINVAL;
37465 - if (index < 0 || index > 0x7f)
37466 + if (index > 0x7f)
37467 return -EINVAL;
37468 pos += tmp;
37469
37470 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37471 index d3448ca..d2864ca 100644
37472 --- a/drivers/usb/atm/usbatm.c
37473 +++ b/drivers/usb/atm/usbatm.c
37474 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37475 if (printk_ratelimit())
37476 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37477 __func__, vpi, vci);
37478 - atomic_inc(&vcc->stats->rx_err);
37479 + atomic_inc_unchecked(&vcc->stats->rx_err);
37480 return;
37481 }
37482
37483 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37484 if (length > ATM_MAX_AAL5_PDU) {
37485 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37486 __func__, length, vcc);
37487 - atomic_inc(&vcc->stats->rx_err);
37488 + atomic_inc_unchecked(&vcc->stats->rx_err);
37489 goto out;
37490 }
37491
37492 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37493 if (sarb->len < pdu_length) {
37494 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37495 __func__, pdu_length, sarb->len, vcc);
37496 - atomic_inc(&vcc->stats->rx_err);
37497 + atomic_inc_unchecked(&vcc->stats->rx_err);
37498 goto out;
37499 }
37500
37501 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37502 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37503 __func__, vcc);
37504 - atomic_inc(&vcc->stats->rx_err);
37505 + atomic_inc_unchecked(&vcc->stats->rx_err);
37506 goto out;
37507 }
37508
37509 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37510 if (printk_ratelimit())
37511 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37512 __func__, length);
37513 - atomic_inc(&vcc->stats->rx_drop);
37514 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37515 goto out;
37516 }
37517
37518 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37519
37520 vcc->push(vcc, skb);
37521
37522 - atomic_inc(&vcc->stats->rx);
37523 + atomic_inc_unchecked(&vcc->stats->rx);
37524 out:
37525 skb_trim(sarb, 0);
37526 }
37527 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37528 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37529
37530 usbatm_pop(vcc, skb);
37531 - atomic_inc(&vcc->stats->tx);
37532 + atomic_inc_unchecked(&vcc->stats->tx);
37533
37534 skb = skb_dequeue(&instance->sndqueue);
37535 }
37536 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37537 if (!left--)
37538 return sprintf(page,
37539 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37540 - atomic_read(&atm_dev->stats.aal5.tx),
37541 - atomic_read(&atm_dev->stats.aal5.tx_err),
37542 - atomic_read(&atm_dev->stats.aal5.rx),
37543 - atomic_read(&atm_dev->stats.aal5.rx_err),
37544 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37545 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37546 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37547 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37548 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37549 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37550
37551 if (!left--) {
37552 if (instance->disconnected)
37553 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37554 index d956965..4179a77 100644
37555 --- a/drivers/usb/core/devices.c
37556 +++ b/drivers/usb/core/devices.c
37557 @@ -126,7 +126,7 @@ static const char format_endpt[] =
37558 * time it gets called.
37559 */
37560 static struct device_connect_event {
37561 - atomic_t count;
37562 + atomic_unchecked_t count;
37563 wait_queue_head_t wait;
37564 } device_event = {
37565 .count = ATOMIC_INIT(1),
37566 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37567
37568 void usbfs_conn_disc_event(void)
37569 {
37570 - atomic_add(2, &device_event.count);
37571 + atomic_add_unchecked(2, &device_event.count);
37572 wake_up(&device_event.wait);
37573 }
37574
37575 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
37576
37577 poll_wait(file, &device_event.wait, wait);
37578
37579 - event_count = atomic_read(&device_event.count);
37580 + event_count = atomic_read_unchecked(&device_event.count);
37581 if (file->f_version != event_count) {
37582 file->f_version = event_count;
37583 return POLLIN | POLLRDNORM;
37584 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
37585 index b3bdfed..a9460e0 100644
37586 --- a/drivers/usb/core/message.c
37587 +++ b/drivers/usb/core/message.c
37588 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
37589 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37590 if (buf) {
37591 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37592 - if (len > 0) {
37593 - smallbuf = kmalloc(++len, GFP_NOIO);
37594 + if (len++ > 0) {
37595 + smallbuf = kmalloc(len, GFP_NOIO);
37596 if (!smallbuf)
37597 return buf;
37598 memcpy(smallbuf, buf, len);
37599 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
37600 index 1fc8f12..20647c1 100644
37601 --- a/drivers/usb/early/ehci-dbgp.c
37602 +++ b/drivers/usb/early/ehci-dbgp.c
37603 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
37604
37605 #ifdef CONFIG_KGDB
37606 static struct kgdb_io kgdbdbgp_io_ops;
37607 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
37608 +static struct kgdb_io kgdbdbgp_io_ops_console;
37609 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
37610 #else
37611 #define dbgp_kgdb_mode (0)
37612 #endif
37613 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
37614 .write_char = kgdbdbgp_write_char,
37615 };
37616
37617 +static struct kgdb_io kgdbdbgp_io_ops_console = {
37618 + .name = "kgdbdbgp",
37619 + .read_char = kgdbdbgp_read_char,
37620 + .write_char = kgdbdbgp_write_char,
37621 + .is_console = 1
37622 +};
37623 +
37624 static int kgdbdbgp_wait_time;
37625
37626 static int __init kgdbdbgp_parse_config(char *str)
37627 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
37628 ptr++;
37629 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
37630 }
37631 - kgdb_register_io_module(&kgdbdbgp_io_ops);
37632 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
37633 + if (early_dbgp_console.index != -1)
37634 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
37635 + else
37636 + kgdb_register_io_module(&kgdbdbgp_io_ops);
37637
37638 return 0;
37639 }
37640 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
37641 index d6bea3e..60b250e 100644
37642 --- a/drivers/usb/wusbcore/wa-hc.h
37643 +++ b/drivers/usb/wusbcore/wa-hc.h
37644 @@ -192,7 +192,7 @@ struct wahc {
37645 struct list_head xfer_delayed_list;
37646 spinlock_t xfer_list_lock;
37647 struct work_struct xfer_work;
37648 - atomic_t xfer_id_count;
37649 + atomic_unchecked_t xfer_id_count;
37650 };
37651
37652
37653 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
37654 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37655 spin_lock_init(&wa->xfer_list_lock);
37656 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37657 - atomic_set(&wa->xfer_id_count, 1);
37658 + atomic_set_unchecked(&wa->xfer_id_count, 1);
37659 }
37660
37661 /**
37662 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
37663 index 57c01ab..8a05959 100644
37664 --- a/drivers/usb/wusbcore/wa-xfer.c
37665 +++ b/drivers/usb/wusbcore/wa-xfer.c
37666 @@ -296,7 +296,7 @@ out:
37667 */
37668 static void wa_xfer_id_init(struct wa_xfer *xfer)
37669 {
37670 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37671 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37672 }
37673
37674 /*
37675 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
37676 index c14c42b..f955cc2 100644
37677 --- a/drivers/vhost/vhost.c
37678 +++ b/drivers/vhost/vhost.c
37679 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
37680 return 0;
37681 }
37682
37683 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
37684 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
37685 {
37686 struct file *eventfp, *filep = NULL,
37687 *pollstart = NULL, *pollstop = NULL;
37688 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
37689 index b0b2ac3..89a4399 100644
37690 --- a/drivers/video/aty/aty128fb.c
37691 +++ b/drivers/video/aty/aty128fb.c
37692 @@ -148,7 +148,7 @@ enum {
37693 };
37694
37695 /* Must match above enum */
37696 -static const char *r128_family[] __devinitdata = {
37697 +static const char *r128_family[] __devinitconst = {
37698 "AGP",
37699 "PCI",
37700 "PRO AGP",
37701 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
37702 index 5c3960d..15cf8fc 100644
37703 --- a/drivers/video/fbcmap.c
37704 +++ b/drivers/video/fbcmap.c
37705 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
37706 rc = -ENODEV;
37707 goto out;
37708 }
37709 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
37710 - !info->fbops->fb_setcmap)) {
37711 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
37712 rc = -EINVAL;
37713 goto out1;
37714 }
37715 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
37716 index ac9141b..9f07583 100644
37717 --- a/drivers/video/fbmem.c
37718 +++ b/drivers/video/fbmem.c
37719 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37720 image->dx += image->width + 8;
37721 }
37722 } else if (rotate == FB_ROTATE_UD) {
37723 - for (x = 0; x < num && image->dx >= 0; x++) {
37724 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
37725 info->fbops->fb_imageblit(info, image);
37726 image->dx -= image->width + 8;
37727 }
37728 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37729 image->dy += image->height + 8;
37730 }
37731 } else if (rotate == FB_ROTATE_CCW) {
37732 - for (x = 0; x < num && image->dy >= 0; x++) {
37733 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
37734 info->fbops->fb_imageblit(info, image);
37735 image->dy -= image->height + 8;
37736 }
37737 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
37738 return -EFAULT;
37739 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
37740 return -EINVAL;
37741 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
37742 + if (con2fb.framebuffer >= FB_MAX)
37743 return -EINVAL;
37744 if (!registered_fb[con2fb.framebuffer])
37745 request_module("fb%d", con2fb.framebuffer);
37746 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
37747 index 5a5d092..265c5ed 100644
37748 --- a/drivers/video/geode/gx1fb_core.c
37749 +++ b/drivers/video/geode/gx1fb_core.c
37750 @@ -29,7 +29,7 @@ static int crt_option = 1;
37751 static char panel_option[32] = "";
37752
37753 /* Modes relevant to the GX1 (taken from modedb.c) */
37754 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
37755 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
37756 /* 640x480-60 VESA */
37757 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
37758 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
37759 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
37760 index 0fad23f..0e9afa4 100644
37761 --- a/drivers/video/gxt4500.c
37762 +++ b/drivers/video/gxt4500.c
37763 @@ -156,7 +156,7 @@ struct gxt4500_par {
37764 static char *mode_option;
37765
37766 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
37767 -static const struct fb_videomode defaultmode __devinitdata = {
37768 +static const struct fb_videomode defaultmode __devinitconst = {
37769 .refresh = 60,
37770 .xres = 1280,
37771 .yres = 1024,
37772 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
37773 return 0;
37774 }
37775
37776 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
37777 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
37778 .id = "IBM GXT4500P",
37779 .type = FB_TYPE_PACKED_PIXELS,
37780 .visual = FB_VISUAL_PSEUDOCOLOR,
37781 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
37782 index 7672d2e..b56437f 100644
37783 --- a/drivers/video/i810/i810_accel.c
37784 +++ b/drivers/video/i810/i810_accel.c
37785 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
37786 }
37787 }
37788 printk("ringbuffer lockup!!!\n");
37789 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
37790 i810_report_error(mmio);
37791 par->dev_flags |= LOCKUP;
37792 info->pixmap.scan_align = 1;
37793 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
37794 index b83f361..2b05a91 100644
37795 --- a/drivers/video/i810/i810_main.c
37796 +++ b/drivers/video/i810/i810_main.c
37797 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
37798 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
37799
37800 /* PCI */
37801 -static const char *i810_pci_list[] __devinitdata = {
37802 +static const char *i810_pci_list[] __devinitconst = {
37803 "Intel(R) 810 Framebuffer Device" ,
37804 "Intel(R) 810-DC100 Framebuffer Device" ,
37805 "Intel(R) 810E Framebuffer Device" ,
37806 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
37807 index de36693..3c63fc2 100644
37808 --- a/drivers/video/jz4740_fb.c
37809 +++ b/drivers/video/jz4740_fb.c
37810 @@ -136,7 +136,7 @@ struct jzfb {
37811 uint32_t pseudo_palette[16];
37812 };
37813
37814 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
37815 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
37816 .id = "JZ4740 FB",
37817 .type = FB_TYPE_PACKED_PIXELS,
37818 .visual = FB_VISUAL_TRUECOLOR,
37819 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
37820 index 3c14e43..eafa544 100644
37821 --- a/drivers/video/logo/logo_linux_clut224.ppm
37822 +++ b/drivers/video/logo/logo_linux_clut224.ppm
37823 @@ -1,1604 +1,1123 @@
37824 P3
37825 -# Standard 224-color Linux logo
37826 80 80
37827 255
37828 - 0 0 0 0 0 0 0 0 0 0 0 0
37829 - 0 0 0 0 0 0 0 0 0 0 0 0
37830 - 0 0 0 0 0 0 0 0 0 0 0 0
37831 - 0 0 0 0 0 0 0 0 0 0 0 0
37832 - 0 0 0 0 0 0 0 0 0 0 0 0
37833 - 0 0 0 0 0 0 0 0 0 0 0 0
37834 - 0 0 0 0 0 0 0 0 0 0 0 0
37835 - 0 0 0 0 0 0 0 0 0 0 0 0
37836 - 0 0 0 0 0 0 0 0 0 0 0 0
37837 - 6 6 6 6 6 6 10 10 10 10 10 10
37838 - 10 10 10 6 6 6 6 6 6 6 6 6
37839 - 0 0 0 0 0 0 0 0 0 0 0 0
37840 - 0 0 0 0 0 0 0 0 0 0 0 0
37841 - 0 0 0 0 0 0 0 0 0 0 0 0
37842 - 0 0 0 0 0 0 0 0 0 0 0 0
37843 - 0 0 0 0 0 0 0 0 0 0 0 0
37844 - 0 0 0 0 0 0 0 0 0 0 0 0
37845 - 0 0 0 0 0 0 0 0 0 0 0 0
37846 - 0 0 0 0 0 0 0 0 0 0 0 0
37847 - 0 0 0 0 0 0 0 0 0 0 0 0
37848 - 0 0 0 0 0 0 0 0 0 0 0 0
37849 - 0 0 0 0 0 0 0 0 0 0 0 0
37850 - 0 0 0 0 0 0 0 0 0 0 0 0
37851 - 0 0 0 0 0 0 0 0 0 0 0 0
37852 - 0 0 0 0 0 0 0 0 0 0 0 0
37853 - 0 0 0 0 0 0 0 0 0 0 0 0
37854 - 0 0 0 0 0 0 0 0 0 0 0 0
37855 - 0 0 0 0 0 0 0 0 0 0 0 0
37856 - 0 0 0 6 6 6 10 10 10 14 14 14
37857 - 22 22 22 26 26 26 30 30 30 34 34 34
37858 - 30 30 30 30 30 30 26 26 26 18 18 18
37859 - 14 14 14 10 10 10 6 6 6 0 0 0
37860 - 0 0 0 0 0 0 0 0 0 0 0 0
37861 - 0 0 0 0 0 0 0 0 0 0 0 0
37862 - 0 0 0 0 0 0 0 0 0 0 0 0
37863 - 0 0 0 0 0 0 0 0 0 0 0 0
37864 - 0 0 0 0 0 0 0 0 0 0 0 0
37865 - 0 0 0 0 0 0 0 0 0 0 0 0
37866 - 0 0 0 0 0 0 0 0 0 0 0 0
37867 - 0 0 0 0 0 0 0 0 0 0 0 0
37868 - 0 0 0 0 0 0 0 0 0 0 0 0
37869 - 0 0 0 0 0 1 0 0 1 0 0 0
37870 - 0 0 0 0 0 0 0 0 0 0 0 0
37871 - 0 0 0 0 0 0 0 0 0 0 0 0
37872 - 0 0 0 0 0 0 0 0 0 0 0 0
37873 - 0 0 0 0 0 0 0 0 0 0 0 0
37874 - 0 0 0 0 0 0 0 0 0 0 0 0
37875 - 0 0 0 0 0 0 0 0 0 0 0 0
37876 - 6 6 6 14 14 14 26 26 26 42 42 42
37877 - 54 54 54 66 66 66 78 78 78 78 78 78
37878 - 78 78 78 74 74 74 66 66 66 54 54 54
37879 - 42 42 42 26 26 26 18 18 18 10 10 10
37880 - 6 6 6 0 0 0 0 0 0 0 0 0
37881 - 0 0 0 0 0 0 0 0 0 0 0 0
37882 - 0 0 0 0 0 0 0 0 0 0 0 0
37883 - 0 0 0 0 0 0 0 0 0 0 0 0
37884 - 0 0 0 0 0 0 0 0 0 0 0 0
37885 - 0 0 0 0 0 0 0 0 0 0 0 0
37886 - 0 0 0 0 0 0 0 0 0 0 0 0
37887 - 0 0 0 0 0 0 0 0 0 0 0 0
37888 - 0 0 0 0 0 0 0 0 0 0 0 0
37889 - 0 0 1 0 0 0 0 0 0 0 0 0
37890 - 0 0 0 0 0 0 0 0 0 0 0 0
37891 - 0 0 0 0 0 0 0 0 0 0 0 0
37892 - 0 0 0 0 0 0 0 0 0 0 0 0
37893 - 0 0 0 0 0 0 0 0 0 0 0 0
37894 - 0 0 0 0 0 0 0 0 0 0 0 0
37895 - 0 0 0 0 0 0 0 0 0 10 10 10
37896 - 22 22 22 42 42 42 66 66 66 86 86 86
37897 - 66 66 66 38 38 38 38 38 38 22 22 22
37898 - 26 26 26 34 34 34 54 54 54 66 66 66
37899 - 86 86 86 70 70 70 46 46 46 26 26 26
37900 - 14 14 14 6 6 6 0 0 0 0 0 0
37901 - 0 0 0 0 0 0 0 0 0 0 0 0
37902 - 0 0 0 0 0 0 0 0 0 0 0 0
37903 - 0 0 0 0 0 0 0 0 0 0 0 0
37904 - 0 0 0 0 0 0 0 0 0 0 0 0
37905 - 0 0 0 0 0 0 0 0 0 0 0 0
37906 - 0 0 0 0 0 0 0 0 0 0 0 0
37907 - 0 0 0 0 0 0 0 0 0 0 0 0
37908 - 0 0 0 0 0 0 0 0 0 0 0 0
37909 - 0 0 1 0 0 1 0 0 1 0 0 0
37910 - 0 0 0 0 0 0 0 0 0 0 0 0
37911 - 0 0 0 0 0 0 0 0 0 0 0 0
37912 - 0 0 0 0 0 0 0 0 0 0 0 0
37913 - 0 0 0 0 0 0 0 0 0 0 0 0
37914 - 0 0 0 0 0 0 0 0 0 0 0 0
37915 - 0 0 0 0 0 0 10 10 10 26 26 26
37916 - 50 50 50 82 82 82 58 58 58 6 6 6
37917 - 2 2 6 2 2 6 2 2 6 2 2 6
37918 - 2 2 6 2 2 6 2 2 6 2 2 6
37919 - 6 6 6 54 54 54 86 86 86 66 66 66
37920 - 38 38 38 18 18 18 6 6 6 0 0 0
37921 - 0 0 0 0 0 0 0 0 0 0 0 0
37922 - 0 0 0 0 0 0 0 0 0 0 0 0
37923 - 0 0 0 0 0 0 0 0 0 0 0 0
37924 - 0 0 0 0 0 0 0 0 0 0 0 0
37925 - 0 0 0 0 0 0 0 0 0 0 0 0
37926 - 0 0 0 0 0 0 0 0 0 0 0 0
37927 - 0 0 0 0 0 0 0 0 0 0 0 0
37928 - 0 0 0 0 0 0 0 0 0 0 0 0
37929 - 0 0 0 0 0 0 0 0 0 0 0 0
37930 - 0 0 0 0 0 0 0 0 0 0 0 0
37931 - 0 0 0 0 0 0 0 0 0 0 0 0
37932 - 0 0 0 0 0 0 0 0 0 0 0 0
37933 - 0 0 0 0 0 0 0 0 0 0 0 0
37934 - 0 0 0 0 0 0 0 0 0 0 0 0
37935 - 0 0 0 6 6 6 22 22 22 50 50 50
37936 - 78 78 78 34 34 34 2 2 6 2 2 6
37937 - 2 2 6 2 2 6 2 2 6 2 2 6
37938 - 2 2 6 2 2 6 2 2 6 2 2 6
37939 - 2 2 6 2 2 6 6 6 6 70 70 70
37940 - 78 78 78 46 46 46 22 22 22 6 6 6
37941 - 0 0 0 0 0 0 0 0 0 0 0 0
37942 - 0 0 0 0 0 0 0 0 0 0 0 0
37943 - 0 0 0 0 0 0 0 0 0 0 0 0
37944 - 0 0 0 0 0 0 0 0 0 0 0 0
37945 - 0 0 0 0 0 0 0 0 0 0 0 0
37946 - 0 0 0 0 0 0 0 0 0 0 0 0
37947 - 0 0 0 0 0 0 0 0 0 0 0 0
37948 - 0 0 0 0 0 0 0 0 0 0 0 0
37949 - 0 0 1 0 0 1 0 0 1 0 0 0
37950 - 0 0 0 0 0 0 0 0 0 0 0 0
37951 - 0 0 0 0 0 0 0 0 0 0 0 0
37952 - 0 0 0 0 0 0 0 0 0 0 0 0
37953 - 0 0 0 0 0 0 0 0 0 0 0 0
37954 - 0 0 0 0 0 0 0 0 0 0 0 0
37955 - 6 6 6 18 18 18 42 42 42 82 82 82
37956 - 26 26 26 2 2 6 2 2 6 2 2 6
37957 - 2 2 6 2 2 6 2 2 6 2 2 6
37958 - 2 2 6 2 2 6 2 2 6 14 14 14
37959 - 46 46 46 34 34 34 6 6 6 2 2 6
37960 - 42 42 42 78 78 78 42 42 42 18 18 18
37961 - 6 6 6 0 0 0 0 0 0 0 0 0
37962 - 0 0 0 0 0 0 0 0 0 0 0 0
37963 - 0 0 0 0 0 0 0 0 0 0 0 0
37964 - 0 0 0 0 0 0 0 0 0 0 0 0
37965 - 0 0 0 0 0 0 0 0 0 0 0 0
37966 - 0 0 0 0 0 0 0 0 0 0 0 0
37967 - 0 0 0 0 0 0 0 0 0 0 0 0
37968 - 0 0 0 0 0 0 0 0 0 0 0 0
37969 - 0 0 1 0 0 0 0 0 1 0 0 0
37970 - 0 0 0 0 0 0 0 0 0 0 0 0
37971 - 0 0 0 0 0 0 0 0 0 0 0 0
37972 - 0 0 0 0 0 0 0 0 0 0 0 0
37973 - 0 0 0 0 0 0 0 0 0 0 0 0
37974 - 0 0 0 0 0 0 0 0 0 0 0 0
37975 - 10 10 10 30 30 30 66 66 66 58 58 58
37976 - 2 2 6 2 2 6 2 2 6 2 2 6
37977 - 2 2 6 2 2 6 2 2 6 2 2 6
37978 - 2 2 6 2 2 6 2 2 6 26 26 26
37979 - 86 86 86 101 101 101 46 46 46 10 10 10
37980 - 2 2 6 58 58 58 70 70 70 34 34 34
37981 - 10 10 10 0 0 0 0 0 0 0 0 0
37982 - 0 0 0 0 0 0 0 0 0 0 0 0
37983 - 0 0 0 0 0 0 0 0 0 0 0 0
37984 - 0 0 0 0 0 0 0 0 0 0 0 0
37985 - 0 0 0 0 0 0 0 0 0 0 0 0
37986 - 0 0 0 0 0 0 0 0 0 0 0 0
37987 - 0 0 0 0 0 0 0 0 0 0 0 0
37988 - 0 0 0 0 0 0 0 0 0 0 0 0
37989 - 0 0 1 0 0 1 0 0 1 0 0 0
37990 - 0 0 0 0 0 0 0 0 0 0 0 0
37991 - 0 0 0 0 0 0 0 0 0 0 0 0
37992 - 0 0 0 0 0 0 0 0 0 0 0 0
37993 - 0 0 0 0 0 0 0 0 0 0 0 0
37994 - 0 0 0 0 0 0 0 0 0 0 0 0
37995 - 14 14 14 42 42 42 86 86 86 10 10 10
37996 - 2 2 6 2 2 6 2 2 6 2 2 6
37997 - 2 2 6 2 2 6 2 2 6 2 2 6
37998 - 2 2 6 2 2 6 2 2 6 30 30 30
37999 - 94 94 94 94 94 94 58 58 58 26 26 26
38000 - 2 2 6 6 6 6 78 78 78 54 54 54
38001 - 22 22 22 6 6 6 0 0 0 0 0 0
38002 - 0 0 0 0 0 0 0 0 0 0 0 0
38003 - 0 0 0 0 0 0 0 0 0 0 0 0
38004 - 0 0 0 0 0 0 0 0 0 0 0 0
38005 - 0 0 0 0 0 0 0 0 0 0 0 0
38006 - 0 0 0 0 0 0 0 0 0 0 0 0
38007 - 0 0 0 0 0 0 0 0 0 0 0 0
38008 - 0 0 0 0 0 0 0 0 0 0 0 0
38009 - 0 0 0 0 0 0 0 0 0 0 0 0
38010 - 0 0 0 0 0 0 0 0 0 0 0 0
38011 - 0 0 0 0 0 0 0 0 0 0 0 0
38012 - 0 0 0 0 0 0 0 0 0 0 0 0
38013 - 0 0 0 0 0 0 0 0 0 0 0 0
38014 - 0 0 0 0 0 0 0 0 0 6 6 6
38015 - 22 22 22 62 62 62 62 62 62 2 2 6
38016 - 2 2 6 2 2 6 2 2 6 2 2 6
38017 - 2 2 6 2 2 6 2 2 6 2 2 6
38018 - 2 2 6 2 2 6 2 2 6 26 26 26
38019 - 54 54 54 38 38 38 18 18 18 10 10 10
38020 - 2 2 6 2 2 6 34 34 34 82 82 82
38021 - 38 38 38 14 14 14 0 0 0 0 0 0
38022 - 0 0 0 0 0 0 0 0 0 0 0 0
38023 - 0 0 0 0 0 0 0 0 0 0 0 0
38024 - 0 0 0 0 0 0 0 0 0 0 0 0
38025 - 0 0 0 0 0 0 0 0 0 0 0 0
38026 - 0 0 0 0 0 0 0 0 0 0 0 0
38027 - 0 0 0 0 0 0 0 0 0 0 0 0
38028 - 0 0 0 0 0 0 0 0 0 0 0 0
38029 - 0 0 0 0 0 1 0 0 1 0 0 0
38030 - 0 0 0 0 0 0 0 0 0 0 0 0
38031 - 0 0 0 0 0 0 0 0 0 0 0 0
38032 - 0 0 0 0 0 0 0 0 0 0 0 0
38033 - 0 0 0 0 0 0 0 0 0 0 0 0
38034 - 0 0 0 0 0 0 0 0 0 6 6 6
38035 - 30 30 30 78 78 78 30 30 30 2 2 6
38036 - 2 2 6 2 2 6 2 2 6 2 2 6
38037 - 2 2 6 2 2 6 2 2 6 2 2 6
38038 - 2 2 6 2 2 6 2 2 6 10 10 10
38039 - 10 10 10 2 2 6 2 2 6 2 2 6
38040 - 2 2 6 2 2 6 2 2 6 78 78 78
38041 - 50 50 50 18 18 18 6 6 6 0 0 0
38042 - 0 0 0 0 0 0 0 0 0 0 0 0
38043 - 0 0 0 0 0 0 0 0 0 0 0 0
38044 - 0 0 0 0 0 0 0 0 0 0 0 0
38045 - 0 0 0 0 0 0 0 0 0 0 0 0
38046 - 0 0 0 0 0 0 0 0 0 0 0 0
38047 - 0 0 0 0 0 0 0 0 0 0 0 0
38048 - 0 0 0 0 0 0 0 0 0 0 0 0
38049 - 0 0 1 0 0 0 0 0 0 0 0 0
38050 - 0 0 0 0 0 0 0 0 0 0 0 0
38051 - 0 0 0 0 0 0 0 0 0 0 0 0
38052 - 0 0 0 0 0 0 0 0 0 0 0 0
38053 - 0 0 0 0 0 0 0 0 0 0 0 0
38054 - 0 0 0 0 0 0 0 0 0 10 10 10
38055 - 38 38 38 86 86 86 14 14 14 2 2 6
38056 - 2 2 6 2 2 6 2 2 6 2 2 6
38057 - 2 2 6 2 2 6 2 2 6 2 2 6
38058 - 2 2 6 2 2 6 2 2 6 2 2 6
38059 - 2 2 6 2 2 6 2 2 6 2 2 6
38060 - 2 2 6 2 2 6 2 2 6 54 54 54
38061 - 66 66 66 26 26 26 6 6 6 0 0 0
38062 - 0 0 0 0 0 0 0 0 0 0 0 0
38063 - 0 0 0 0 0 0 0 0 0 0 0 0
38064 - 0 0 0 0 0 0 0 0 0 0 0 0
38065 - 0 0 0 0 0 0 0 0 0 0 0 0
38066 - 0 0 0 0 0 0 0 0 0 0 0 0
38067 - 0 0 0 0 0 0 0 0 0 0 0 0
38068 - 0 0 0 0 0 0 0 0 0 0 0 0
38069 - 0 0 0 0 0 1 0 0 1 0 0 0
38070 - 0 0 0 0 0 0 0 0 0 0 0 0
38071 - 0 0 0 0 0 0 0 0 0 0 0 0
38072 - 0 0 0 0 0 0 0 0 0 0 0 0
38073 - 0 0 0 0 0 0 0 0 0 0 0 0
38074 - 0 0 0 0 0 0 0 0 0 14 14 14
38075 - 42 42 42 82 82 82 2 2 6 2 2 6
38076 - 2 2 6 6 6 6 10 10 10 2 2 6
38077 - 2 2 6 2 2 6 2 2 6 2 2 6
38078 - 2 2 6 2 2 6 2 2 6 6 6 6
38079 - 14 14 14 10 10 10 2 2 6 2 2 6
38080 - 2 2 6 2 2 6 2 2 6 18 18 18
38081 - 82 82 82 34 34 34 10 10 10 0 0 0
38082 - 0 0 0 0 0 0 0 0 0 0 0 0
38083 - 0 0 0 0 0 0 0 0 0 0 0 0
38084 - 0 0 0 0 0 0 0 0 0 0 0 0
38085 - 0 0 0 0 0 0 0 0 0 0 0 0
38086 - 0 0 0 0 0 0 0 0 0 0 0 0
38087 - 0 0 0 0 0 0 0 0 0 0 0 0
38088 - 0 0 0 0 0 0 0 0 0 0 0 0
38089 - 0 0 1 0 0 0 0 0 0 0 0 0
38090 - 0 0 0 0 0 0 0 0 0 0 0 0
38091 - 0 0 0 0 0 0 0 0 0 0 0 0
38092 - 0 0 0 0 0 0 0 0 0 0 0 0
38093 - 0 0 0 0 0 0 0 0 0 0 0 0
38094 - 0 0 0 0 0 0 0 0 0 14 14 14
38095 - 46 46 46 86 86 86 2 2 6 2 2 6
38096 - 6 6 6 6 6 6 22 22 22 34 34 34
38097 - 6 6 6 2 2 6 2 2 6 2 2 6
38098 - 2 2 6 2 2 6 18 18 18 34 34 34
38099 - 10 10 10 50 50 50 22 22 22 2 2 6
38100 - 2 2 6 2 2 6 2 2 6 10 10 10
38101 - 86 86 86 42 42 42 14 14 14 0 0 0
38102 - 0 0 0 0 0 0 0 0 0 0 0 0
38103 - 0 0 0 0 0 0 0 0 0 0 0 0
38104 - 0 0 0 0 0 0 0 0 0 0 0 0
38105 - 0 0 0 0 0 0 0 0 0 0 0 0
38106 - 0 0 0 0 0 0 0 0 0 0 0 0
38107 - 0 0 0 0 0 0 0 0 0 0 0 0
38108 - 0 0 0 0 0 0 0 0 0 0 0 0
38109 - 0 0 1 0 0 1 0 0 1 0 0 0
38110 - 0 0 0 0 0 0 0 0 0 0 0 0
38111 - 0 0 0 0 0 0 0 0 0 0 0 0
38112 - 0 0 0 0 0 0 0 0 0 0 0 0
38113 - 0 0 0 0 0 0 0 0 0 0 0 0
38114 - 0 0 0 0 0 0 0 0 0 14 14 14
38115 - 46 46 46 86 86 86 2 2 6 2 2 6
38116 - 38 38 38 116 116 116 94 94 94 22 22 22
38117 - 22 22 22 2 2 6 2 2 6 2 2 6
38118 - 14 14 14 86 86 86 138 138 138 162 162 162
38119 -154 154 154 38 38 38 26 26 26 6 6 6
38120 - 2 2 6 2 2 6 2 2 6 2 2 6
38121 - 86 86 86 46 46 46 14 14 14 0 0 0
38122 - 0 0 0 0 0 0 0 0 0 0 0 0
38123 - 0 0 0 0 0 0 0 0 0 0 0 0
38124 - 0 0 0 0 0 0 0 0 0 0 0 0
38125 - 0 0 0 0 0 0 0 0 0 0 0 0
38126 - 0 0 0 0 0 0 0 0 0 0 0 0
38127 - 0 0 0 0 0 0 0 0 0 0 0 0
38128 - 0 0 0 0 0 0 0 0 0 0 0 0
38129 - 0 0 0 0 0 0 0 0 0 0 0 0
38130 - 0 0 0 0 0 0 0 0 0 0 0 0
38131 - 0 0 0 0 0 0 0 0 0 0 0 0
38132 - 0 0 0 0 0 0 0 0 0 0 0 0
38133 - 0 0 0 0 0 0 0 0 0 0 0 0
38134 - 0 0 0 0 0 0 0 0 0 14 14 14
38135 - 46 46 46 86 86 86 2 2 6 14 14 14
38136 -134 134 134 198 198 198 195 195 195 116 116 116
38137 - 10 10 10 2 2 6 2 2 6 6 6 6
38138 -101 98 89 187 187 187 210 210 210 218 218 218
38139 -214 214 214 134 134 134 14 14 14 6 6 6
38140 - 2 2 6 2 2 6 2 2 6 2 2 6
38141 - 86 86 86 50 50 50 18 18 18 6 6 6
38142 - 0 0 0 0 0 0 0 0 0 0 0 0
38143 - 0 0 0 0 0 0 0 0 0 0 0 0
38144 - 0 0 0 0 0 0 0 0 0 0 0 0
38145 - 0 0 0 0 0 0 0 0 0 0 0 0
38146 - 0 0 0 0 0 0 0 0 0 0 0 0
38147 - 0 0 0 0 0 0 0 0 0 0 0 0
38148 - 0 0 0 0 0 0 0 0 1 0 0 0
38149 - 0 0 1 0 0 1 0 0 1 0 0 0
38150 - 0 0 0 0 0 0 0 0 0 0 0 0
38151 - 0 0 0 0 0 0 0 0 0 0 0 0
38152 - 0 0 0 0 0 0 0 0 0 0 0 0
38153 - 0 0 0 0 0 0 0 0 0 0 0 0
38154 - 0 0 0 0 0 0 0 0 0 14 14 14
38155 - 46 46 46 86 86 86 2 2 6 54 54 54
38156 -218 218 218 195 195 195 226 226 226 246 246 246
38157 - 58 58 58 2 2 6 2 2 6 30 30 30
38158 -210 210 210 253 253 253 174 174 174 123 123 123
38159 -221 221 221 234 234 234 74 74 74 2 2 6
38160 - 2 2 6 2 2 6 2 2 6 2 2 6
38161 - 70 70 70 58 58 58 22 22 22 6 6 6
38162 - 0 0 0 0 0 0 0 0 0 0 0 0
38163 - 0 0 0 0 0 0 0 0 0 0 0 0
38164 - 0 0 0 0 0 0 0 0 0 0 0 0
38165 - 0 0 0 0 0 0 0 0 0 0 0 0
38166 - 0 0 0 0 0 0 0 0 0 0 0 0
38167 - 0 0 0 0 0 0 0 0 0 0 0 0
38168 - 0 0 0 0 0 0 0 0 0 0 0 0
38169 - 0 0 0 0 0 0 0 0 0 0 0 0
38170 - 0 0 0 0 0 0 0 0 0 0 0 0
38171 - 0 0 0 0 0 0 0 0 0 0 0 0
38172 - 0 0 0 0 0 0 0 0 0 0 0 0
38173 - 0 0 0 0 0 0 0 0 0 0 0 0
38174 - 0 0 0 0 0 0 0 0 0 14 14 14
38175 - 46 46 46 82 82 82 2 2 6 106 106 106
38176 -170 170 170 26 26 26 86 86 86 226 226 226
38177 -123 123 123 10 10 10 14 14 14 46 46 46
38178 -231 231 231 190 190 190 6 6 6 70 70 70
38179 - 90 90 90 238 238 238 158 158 158 2 2 6
38180 - 2 2 6 2 2 6 2 2 6 2 2 6
38181 - 70 70 70 58 58 58 22 22 22 6 6 6
38182 - 0 0 0 0 0 0 0 0 0 0 0 0
38183 - 0 0 0 0 0 0 0 0 0 0 0 0
38184 - 0 0 0 0 0 0 0 0 0 0 0 0
38185 - 0 0 0 0 0 0 0 0 0 0 0 0
38186 - 0 0 0 0 0 0 0 0 0 0 0 0
38187 - 0 0 0 0 0 0 0 0 0 0 0 0
38188 - 0 0 0 0 0 0 0 0 1 0 0 0
38189 - 0 0 1 0 0 1 0 0 1 0 0 0
38190 - 0 0 0 0 0 0 0 0 0 0 0 0
38191 - 0 0 0 0 0 0 0 0 0 0 0 0
38192 - 0 0 0 0 0 0 0 0 0 0 0 0
38193 - 0 0 0 0 0 0 0 0 0 0 0 0
38194 - 0 0 0 0 0 0 0 0 0 14 14 14
38195 - 42 42 42 86 86 86 6 6 6 116 116 116
38196 -106 106 106 6 6 6 70 70 70 149 149 149
38197 -128 128 128 18 18 18 38 38 38 54 54 54
38198 -221 221 221 106 106 106 2 2 6 14 14 14
38199 - 46 46 46 190 190 190 198 198 198 2 2 6
38200 - 2 2 6 2 2 6 2 2 6 2 2 6
38201 - 74 74 74 62 62 62 22 22 22 6 6 6
38202 - 0 0 0 0 0 0 0 0 0 0 0 0
38203 - 0 0 0 0 0 0 0 0 0 0 0 0
38204 - 0 0 0 0 0 0 0 0 0 0 0 0
38205 - 0 0 0 0 0 0 0 0 0 0 0 0
38206 - 0 0 0 0 0 0 0 0 0 0 0 0
38207 - 0 0 0 0 0 0 0 0 0 0 0 0
38208 - 0 0 0 0 0 0 0 0 1 0 0 0
38209 - 0 0 1 0 0 0 0 0 1 0 0 0
38210 - 0 0 0 0 0 0 0 0 0 0 0 0
38211 - 0 0 0 0 0 0 0 0 0 0 0 0
38212 - 0 0 0 0 0 0 0 0 0 0 0 0
38213 - 0 0 0 0 0 0 0 0 0 0 0 0
38214 - 0 0 0 0 0 0 0 0 0 14 14 14
38215 - 42 42 42 94 94 94 14 14 14 101 101 101
38216 -128 128 128 2 2 6 18 18 18 116 116 116
38217 -118 98 46 121 92 8 121 92 8 98 78 10
38218 -162 162 162 106 106 106 2 2 6 2 2 6
38219 - 2 2 6 195 195 195 195 195 195 6 6 6
38220 - 2 2 6 2 2 6 2 2 6 2 2 6
38221 - 74 74 74 62 62 62 22 22 22 6 6 6
38222 - 0 0 0 0 0 0 0 0 0 0 0 0
38223 - 0 0 0 0 0 0 0 0 0 0 0 0
38224 - 0 0 0 0 0 0 0 0 0 0 0 0
38225 - 0 0 0 0 0 0 0 0 0 0 0 0
38226 - 0 0 0 0 0 0 0 0 0 0 0 0
38227 - 0 0 0 0 0 0 0 0 0 0 0 0
38228 - 0 0 0 0 0 0 0 0 1 0 0 1
38229 - 0 0 1 0 0 0 0 0 1 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 10 10 10
38235 - 38 38 38 90 90 90 14 14 14 58 58 58
38236 -210 210 210 26 26 26 54 38 6 154 114 10
38237 -226 170 11 236 186 11 225 175 15 184 144 12
38238 -215 174 15 175 146 61 37 26 9 2 2 6
38239 - 70 70 70 246 246 246 138 138 138 2 2 6
38240 - 2 2 6 2 2 6 2 2 6 2 2 6
38241 - 70 70 70 66 66 66 26 26 26 6 6 6
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 0 0 0 0
38247 - 0 0 0 0 0 0 0 0 0 0 0 0
38248 - 0 0 0 0 0 0 0 0 0 0 0 0
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 0 0 0
38253 - 0 0 0 0 0 0 0 0 0 0 0 0
38254 - 0 0 0 0 0 0 0 0 0 10 10 10
38255 - 38 38 38 86 86 86 14 14 14 10 10 10
38256 -195 195 195 188 164 115 192 133 9 225 175 15
38257 -239 182 13 234 190 10 232 195 16 232 200 30
38258 -245 207 45 241 208 19 232 195 16 184 144 12
38259 -218 194 134 211 206 186 42 42 42 2 2 6
38260 - 2 2 6 2 2 6 2 2 6 2 2 6
38261 - 50 50 50 74 74 74 30 30 30 6 6 6
38262 - 0 0 0 0 0 0 0 0 0 0 0 0
38263 - 0 0 0 0 0 0 0 0 0 0 0 0
38264 - 0 0 0 0 0 0 0 0 0 0 0 0
38265 - 0 0 0 0 0 0 0 0 0 0 0 0
38266 - 0 0 0 0 0 0 0 0 0 0 0 0
38267 - 0 0 0 0 0 0 0 0 0 0 0 0
38268 - 0 0 0 0 0 0 0 0 0 0 0 0
38269 - 0 0 0 0 0 0 0 0 0 0 0 0
38270 - 0 0 0 0 0 0 0 0 0 0 0 0
38271 - 0 0 0 0 0 0 0 0 0 0 0 0
38272 - 0 0 0 0 0 0 0 0 0 0 0 0
38273 - 0 0 0 0 0 0 0 0 0 0 0 0
38274 - 0 0 0 0 0 0 0 0 0 10 10 10
38275 - 34 34 34 86 86 86 14 14 14 2 2 6
38276 -121 87 25 192 133 9 219 162 10 239 182 13
38277 -236 186 11 232 195 16 241 208 19 244 214 54
38278 -246 218 60 246 218 38 246 215 20 241 208 19
38279 -241 208 19 226 184 13 121 87 25 2 2 6
38280 - 2 2 6 2 2 6 2 2 6 2 2 6
38281 - 50 50 50 82 82 82 34 34 34 10 10 10
38282 - 0 0 0 0 0 0 0 0 0 0 0 0
38283 - 0 0 0 0 0 0 0 0 0 0 0 0
38284 - 0 0 0 0 0 0 0 0 0 0 0 0
38285 - 0 0 0 0 0 0 0 0 0 0 0 0
38286 - 0 0 0 0 0 0 0 0 0 0 0 0
38287 - 0 0 0 0 0 0 0 0 0 0 0 0
38288 - 0 0 0 0 0 0 0 0 0 0 0 0
38289 - 0 0 0 0 0 0 0 0 0 0 0 0
38290 - 0 0 0 0 0 0 0 0 0 0 0 0
38291 - 0 0 0 0 0 0 0 0 0 0 0 0
38292 - 0 0 0 0 0 0 0 0 0 0 0 0
38293 - 0 0 0 0 0 0 0 0 0 0 0 0
38294 - 0 0 0 0 0 0 0 0 0 10 10 10
38295 - 34 34 34 82 82 82 30 30 30 61 42 6
38296 -180 123 7 206 145 10 230 174 11 239 182 13
38297 -234 190 10 238 202 15 241 208 19 246 218 74
38298 -246 218 38 246 215 20 246 215 20 246 215 20
38299 -226 184 13 215 174 15 184 144 12 6 6 6
38300 - 2 2 6 2 2 6 2 2 6 2 2 6
38301 - 26 26 26 94 94 94 42 42 42 14 14 14
38302 - 0 0 0 0 0 0 0 0 0 0 0 0
38303 - 0 0 0 0 0 0 0 0 0 0 0 0
38304 - 0 0 0 0 0 0 0 0 0 0 0 0
38305 - 0 0 0 0 0 0 0 0 0 0 0 0
38306 - 0 0 0 0 0 0 0 0 0 0 0 0
38307 - 0 0 0 0 0 0 0 0 0 0 0 0
38308 - 0 0 0 0 0 0 0 0 0 0 0 0
38309 - 0 0 0 0 0 0 0 0 0 0 0 0
38310 - 0 0 0 0 0 0 0 0 0 0 0 0
38311 - 0 0 0 0 0 0 0 0 0 0 0 0
38312 - 0 0 0 0 0 0 0 0 0 0 0 0
38313 - 0 0 0 0 0 0 0 0 0 0 0 0
38314 - 0 0 0 0 0 0 0 0 0 10 10 10
38315 - 30 30 30 78 78 78 50 50 50 104 69 6
38316 -192 133 9 216 158 10 236 178 12 236 186 11
38317 -232 195 16 241 208 19 244 214 54 245 215 43
38318 -246 215 20 246 215 20 241 208 19 198 155 10
38319 -200 144 11 216 158 10 156 118 10 2 2 6
38320 - 2 2 6 2 2 6 2 2 6 2 2 6
38321 - 6 6 6 90 90 90 54 54 54 18 18 18
38322 - 6 6 6 0 0 0 0 0 0 0 0 0
38323 - 0 0 0 0 0 0 0 0 0 0 0 0
38324 - 0 0 0 0 0 0 0 0 0 0 0 0
38325 - 0 0 0 0 0 0 0 0 0 0 0 0
38326 - 0 0 0 0 0 0 0 0 0 0 0 0
38327 - 0 0 0 0 0 0 0 0 0 0 0 0
38328 - 0 0 0 0 0 0 0 0 0 0 0 0
38329 - 0 0 0 0 0 0 0 0 0 0 0 0
38330 - 0 0 0 0 0 0 0 0 0 0 0 0
38331 - 0 0 0 0 0 0 0 0 0 0 0 0
38332 - 0 0 0 0 0 0 0 0 0 0 0 0
38333 - 0 0 0 0 0 0 0 0 0 0 0 0
38334 - 0 0 0 0 0 0 0 0 0 10 10 10
38335 - 30 30 30 78 78 78 46 46 46 22 22 22
38336 -137 92 6 210 162 10 239 182 13 238 190 10
38337 -238 202 15 241 208 19 246 215 20 246 215 20
38338 -241 208 19 203 166 17 185 133 11 210 150 10
38339 -216 158 10 210 150 10 102 78 10 2 2 6
38340 - 6 6 6 54 54 54 14 14 14 2 2 6
38341 - 2 2 6 62 62 62 74 74 74 30 30 30
38342 - 10 10 10 0 0 0 0 0 0 0 0 0
38343 - 0 0 0 0 0 0 0 0 0 0 0 0
38344 - 0 0 0 0 0 0 0 0 0 0 0 0
38345 - 0 0 0 0 0 0 0 0 0 0 0 0
38346 - 0 0 0 0 0 0 0 0 0 0 0 0
38347 - 0 0 0 0 0 0 0 0 0 0 0 0
38348 - 0 0 0 0 0 0 0 0 0 0 0 0
38349 - 0 0 0 0 0 0 0 0 0 0 0 0
38350 - 0 0 0 0 0 0 0 0 0 0 0 0
38351 - 0 0 0 0 0 0 0 0 0 0 0 0
38352 - 0 0 0 0 0 0 0 0 0 0 0 0
38353 - 0 0 0 0 0 0 0 0 0 0 0 0
38354 - 0 0 0 0 0 0 0 0 0 10 10 10
38355 - 34 34 34 78 78 78 50 50 50 6 6 6
38356 - 94 70 30 139 102 15 190 146 13 226 184 13
38357 -232 200 30 232 195 16 215 174 15 190 146 13
38358 -168 122 10 192 133 9 210 150 10 213 154 11
38359 -202 150 34 182 157 106 101 98 89 2 2 6
38360 - 2 2 6 78 78 78 116 116 116 58 58 58
38361 - 2 2 6 22 22 22 90 90 90 46 46 46
38362 - 18 18 18 6 6 6 0 0 0 0 0 0
38363 - 0 0 0 0 0 0 0 0 0 0 0 0
38364 - 0 0 0 0 0 0 0 0 0 0 0 0
38365 - 0 0 0 0 0 0 0 0 0 0 0 0
38366 - 0 0 0 0 0 0 0 0 0 0 0 0
38367 - 0 0 0 0 0 0 0 0 0 0 0 0
38368 - 0 0 0 0 0 0 0 0 0 0 0 0
38369 - 0 0 0 0 0 0 0 0 0 0 0 0
38370 - 0 0 0 0 0 0 0 0 0 0 0 0
38371 - 0 0 0 0 0 0 0 0 0 0 0 0
38372 - 0 0 0 0 0 0 0 0 0 0 0 0
38373 - 0 0 0 0 0 0 0 0 0 0 0 0
38374 - 0 0 0 0 0 0 0 0 0 10 10 10
38375 - 38 38 38 86 86 86 50 50 50 6 6 6
38376 -128 128 128 174 154 114 156 107 11 168 122 10
38377 -198 155 10 184 144 12 197 138 11 200 144 11
38378 -206 145 10 206 145 10 197 138 11 188 164 115
38379 -195 195 195 198 198 198 174 174 174 14 14 14
38380 - 2 2 6 22 22 22 116 116 116 116 116 116
38381 - 22 22 22 2 2 6 74 74 74 70 70 70
38382 - 30 30 30 10 10 10 0 0 0 0 0 0
38383 - 0 0 0 0 0 0 0 0 0 0 0 0
38384 - 0 0 0 0 0 0 0 0 0 0 0 0
38385 - 0 0 0 0 0 0 0 0 0 0 0 0
38386 - 0 0 0 0 0 0 0 0 0 0 0 0
38387 - 0 0 0 0 0 0 0 0 0 0 0 0
38388 - 0 0 0 0 0 0 0 0 0 0 0 0
38389 - 0 0 0 0 0 0 0 0 0 0 0 0
38390 - 0 0 0 0 0 0 0 0 0 0 0 0
38391 - 0 0 0 0 0 0 0 0 0 0 0 0
38392 - 0 0 0 0 0 0 0 0 0 0 0 0
38393 - 0 0 0 0 0 0 0 0 0 0 0 0
38394 - 0 0 0 0 0 0 6 6 6 18 18 18
38395 - 50 50 50 101 101 101 26 26 26 10 10 10
38396 -138 138 138 190 190 190 174 154 114 156 107 11
38397 -197 138 11 200 144 11 197 138 11 192 133 9
38398 -180 123 7 190 142 34 190 178 144 187 187 187
38399 -202 202 202 221 221 221 214 214 214 66 66 66
38400 - 2 2 6 2 2 6 50 50 50 62 62 62
38401 - 6 6 6 2 2 6 10 10 10 90 90 90
38402 - 50 50 50 18 18 18 6 6 6 0 0 0
38403 - 0 0 0 0 0 0 0 0 0 0 0 0
38404 - 0 0 0 0 0 0 0 0 0 0 0 0
38405 - 0 0 0 0 0 0 0 0 0 0 0 0
38406 - 0 0 0 0 0 0 0 0 0 0 0 0
38407 - 0 0 0 0 0 0 0 0 0 0 0 0
38408 - 0 0 0 0 0 0 0 0 0 0 0 0
38409 - 0 0 0 0 0 0 0 0 0 0 0 0
38410 - 0 0 0 0 0 0 0 0 0 0 0 0
38411 - 0 0 0 0 0 0 0 0 0 0 0 0
38412 - 0 0 0 0 0 0 0 0 0 0 0 0
38413 - 0 0 0 0 0 0 0 0 0 0 0 0
38414 - 0 0 0 0 0 0 10 10 10 34 34 34
38415 - 74 74 74 74 74 74 2 2 6 6 6 6
38416 -144 144 144 198 198 198 190 190 190 178 166 146
38417 -154 121 60 156 107 11 156 107 11 168 124 44
38418 -174 154 114 187 187 187 190 190 190 210 210 210
38419 -246 246 246 253 253 253 253 253 253 182 182 182
38420 - 6 6 6 2 2 6 2 2 6 2 2 6
38421 - 2 2 6 2 2 6 2 2 6 62 62 62
38422 - 74 74 74 34 34 34 14 14 14 0 0 0
38423 - 0 0 0 0 0 0 0 0 0 0 0 0
38424 - 0 0 0 0 0 0 0 0 0 0 0 0
38425 - 0 0 0 0 0 0 0 0 0 0 0 0
38426 - 0 0 0 0 0 0 0 0 0 0 0 0
38427 - 0 0 0 0 0 0 0 0 0 0 0 0
38428 - 0 0 0 0 0 0 0 0 0 0 0 0
38429 - 0 0 0 0 0 0 0 0 0 0 0 0
38430 - 0 0 0 0 0 0 0 0 0 0 0 0
38431 - 0 0 0 0 0 0 0 0 0 0 0 0
38432 - 0 0 0 0 0 0 0 0 0 0 0 0
38433 - 0 0 0 0 0 0 0 0 0 0 0 0
38434 - 0 0 0 10 10 10 22 22 22 54 54 54
38435 - 94 94 94 18 18 18 2 2 6 46 46 46
38436 -234 234 234 221 221 221 190 190 190 190 190 190
38437 -190 190 190 187 187 187 187 187 187 190 190 190
38438 -190 190 190 195 195 195 214 214 214 242 242 242
38439 -253 253 253 253 253 253 253 253 253 253 253 253
38440 - 82 82 82 2 2 6 2 2 6 2 2 6
38441 - 2 2 6 2 2 6 2 2 6 14 14 14
38442 - 86 86 86 54 54 54 22 22 22 6 6 6
38443 - 0 0 0 0 0 0 0 0 0 0 0 0
38444 - 0 0 0 0 0 0 0 0 0 0 0 0
38445 - 0 0 0 0 0 0 0 0 0 0 0 0
38446 - 0 0 0 0 0 0 0 0 0 0 0 0
38447 - 0 0 0 0 0 0 0 0 0 0 0 0
38448 - 0 0 0 0 0 0 0 0 0 0 0 0
38449 - 0 0 0 0 0 0 0 0 0 0 0 0
38450 - 0 0 0 0 0 0 0 0 0 0 0 0
38451 - 0 0 0 0 0 0 0 0 0 0 0 0
38452 - 0 0 0 0 0 0 0 0 0 0 0 0
38453 - 0 0 0 0 0 0 0 0 0 0 0 0
38454 - 6 6 6 18 18 18 46 46 46 90 90 90
38455 - 46 46 46 18 18 18 6 6 6 182 182 182
38456 -253 253 253 246 246 246 206 206 206 190 190 190
38457 -190 190 190 190 190 190 190 190 190 190 190 190
38458 -206 206 206 231 231 231 250 250 250 253 253 253
38459 -253 253 253 253 253 253 253 253 253 253 253 253
38460 -202 202 202 14 14 14 2 2 6 2 2 6
38461 - 2 2 6 2 2 6 2 2 6 2 2 6
38462 - 42 42 42 86 86 86 42 42 42 18 18 18
38463 - 6 6 6 0 0 0 0 0 0 0 0 0
38464 - 0 0 0 0 0 0 0 0 0 0 0 0
38465 - 0 0 0 0 0 0 0 0 0 0 0 0
38466 - 0 0 0 0 0 0 0 0 0 0 0 0
38467 - 0 0 0 0 0 0 0 0 0 0 0 0
38468 - 0 0 0 0 0 0 0 0 0 0 0 0
38469 - 0 0 0 0 0 0 0 0 0 0 0 0
38470 - 0 0 0 0 0 0 0 0 0 0 0 0
38471 - 0 0 0 0 0 0 0 0 0 0 0 0
38472 - 0 0 0 0 0 0 0 0 0 0 0 0
38473 - 0 0 0 0 0 0 0 0 0 6 6 6
38474 - 14 14 14 38 38 38 74 74 74 66 66 66
38475 - 2 2 6 6 6 6 90 90 90 250 250 250
38476 -253 253 253 253 253 253 238 238 238 198 198 198
38477 -190 190 190 190 190 190 195 195 195 221 221 221
38478 -246 246 246 253 253 253 253 253 253 253 253 253
38479 -253 253 253 253 253 253 253 253 253 253 253 253
38480 -253 253 253 82 82 82 2 2 6 2 2 6
38481 - 2 2 6 2 2 6 2 2 6 2 2 6
38482 - 2 2 6 78 78 78 70 70 70 34 34 34
38483 - 14 14 14 6 6 6 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 0 0 0
38485 - 0 0 0 0 0 0 0 0 0 0 0 0
38486 - 0 0 0 0 0 0 0 0 0 0 0 0
38487 - 0 0 0 0 0 0 0 0 0 0 0 0
38488 - 0 0 0 0 0 0 0 0 0 0 0 0
38489 - 0 0 0 0 0 0 0 0 0 0 0 0
38490 - 0 0 0 0 0 0 0 0 0 0 0 0
38491 - 0 0 0 0 0 0 0 0 0 0 0 0
38492 - 0 0 0 0 0 0 0 0 0 0 0 0
38493 - 0 0 0 0 0 0 0 0 0 14 14 14
38494 - 34 34 34 66 66 66 78 78 78 6 6 6
38495 - 2 2 6 18 18 18 218 218 218 253 253 253
38496 -253 253 253 253 253 253 253 253 253 246 246 246
38497 -226 226 226 231 231 231 246 246 246 253 253 253
38498 -253 253 253 253 253 253 253 253 253 253 253 253
38499 -253 253 253 253 253 253 253 253 253 253 253 253
38500 -253 253 253 178 178 178 2 2 6 2 2 6
38501 - 2 2 6 2 2 6 2 2 6 2 2 6
38502 - 2 2 6 18 18 18 90 90 90 62 62 62
38503 - 30 30 30 10 10 10 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 0 0 0
38505 - 0 0 0 0 0 0 0 0 0 0 0 0
38506 - 0 0 0 0 0 0 0 0 0 0 0 0
38507 - 0 0 0 0 0 0 0 0 0 0 0 0
38508 - 0 0 0 0 0 0 0 0 0 0 0 0
38509 - 0 0 0 0 0 0 0 0 0 0 0 0
38510 - 0 0 0 0 0 0 0 0 0 0 0 0
38511 - 0 0 0 0 0 0 0 0 0 0 0 0
38512 - 0 0 0 0 0 0 0 0 0 0 0 0
38513 - 0 0 0 0 0 0 10 10 10 26 26 26
38514 - 58 58 58 90 90 90 18 18 18 2 2 6
38515 - 2 2 6 110 110 110 253 253 253 253 253 253
38516 -253 253 253 253 253 253 253 253 253 253 253 253
38517 -250 250 250 253 253 253 253 253 253 253 253 253
38518 -253 253 253 253 253 253 253 253 253 253 253 253
38519 -253 253 253 253 253 253 253 253 253 253 253 253
38520 -253 253 253 231 231 231 18 18 18 2 2 6
38521 - 2 2 6 2 2 6 2 2 6 2 2 6
38522 - 2 2 6 2 2 6 18 18 18 94 94 94
38523 - 54 54 54 26 26 26 10 10 10 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 0 0 0
38525 - 0 0 0 0 0 0 0 0 0 0 0 0
38526 - 0 0 0 0 0 0 0 0 0 0 0 0
38527 - 0 0 0 0 0 0 0 0 0 0 0 0
38528 - 0 0 0 0 0 0 0 0 0 0 0 0
38529 - 0 0 0 0 0 0 0 0 0 0 0 0
38530 - 0 0 0 0 0 0 0 0 0 0 0 0
38531 - 0 0 0 0 0 0 0 0 0 0 0 0
38532 - 0 0 0 0 0 0 0 0 0 0 0 0
38533 - 0 0 0 6 6 6 22 22 22 50 50 50
38534 - 90 90 90 26 26 26 2 2 6 2 2 6
38535 - 14 14 14 195 195 195 250 250 250 253 253 253
38536 -253 253 253 253 253 253 253 253 253 253 253 253
38537 -253 253 253 253 253 253 253 253 253 253 253 253
38538 -253 253 253 253 253 253 253 253 253 253 253 253
38539 -253 253 253 253 253 253 253 253 253 253 253 253
38540 -250 250 250 242 242 242 54 54 54 2 2 6
38541 - 2 2 6 2 2 6 2 2 6 2 2 6
38542 - 2 2 6 2 2 6 2 2 6 38 38 38
38543 - 86 86 86 50 50 50 22 22 22 6 6 6
38544 - 0 0 0 0 0 0 0 0 0 0 0 0
38545 - 0 0 0 0 0 0 0 0 0 0 0 0
38546 - 0 0 0 0 0 0 0 0 0 0 0 0
38547 - 0 0 0 0 0 0 0 0 0 0 0 0
38548 - 0 0 0 0 0 0 0 0 0 0 0 0
38549 - 0 0 0 0 0 0 0 0 0 0 0 0
38550 - 0 0 0 0 0 0 0 0 0 0 0 0
38551 - 0 0 0 0 0 0 0 0 0 0 0 0
38552 - 0 0 0 0 0 0 0 0 0 0 0 0
38553 - 6 6 6 14 14 14 38 38 38 82 82 82
38554 - 34 34 34 2 2 6 2 2 6 2 2 6
38555 - 42 42 42 195 195 195 246 246 246 253 253 253
38556 -253 253 253 253 253 253 253 253 253 250 250 250
38557 -242 242 242 242 242 242 250 250 250 253 253 253
38558 -253 253 253 253 253 253 253 253 253 253 253 253
38559 -253 253 253 250 250 250 246 246 246 238 238 238
38560 -226 226 226 231 231 231 101 101 101 6 6 6
38561 - 2 2 6 2 2 6 2 2 6 2 2 6
38562 - 2 2 6 2 2 6 2 2 6 2 2 6
38563 - 38 38 38 82 82 82 42 42 42 14 14 14
38564 - 6 6 6 0 0 0 0 0 0 0 0 0
38565 - 0 0 0 0 0 0 0 0 0 0 0 0
38566 - 0 0 0 0 0 0 0 0 0 0 0 0
38567 - 0 0 0 0 0 0 0 0 0 0 0 0
38568 - 0 0 0 0 0 0 0 0 0 0 0 0
38569 - 0 0 0 0 0 0 0 0 0 0 0 0
38570 - 0 0 0 0 0 0 0 0 0 0 0 0
38571 - 0 0 0 0 0 0 0 0 0 0 0 0
38572 - 0 0 0 0 0 0 0 0 0 0 0 0
38573 - 10 10 10 26 26 26 62 62 62 66 66 66
38574 - 2 2 6 2 2 6 2 2 6 6 6 6
38575 - 70 70 70 170 170 170 206 206 206 234 234 234
38576 -246 246 246 250 250 250 250 250 250 238 238 238
38577 -226 226 226 231 231 231 238 238 238 250 250 250
38578 -250 250 250 250 250 250 246 246 246 231 231 231
38579 -214 214 214 206 206 206 202 202 202 202 202 202
38580 -198 198 198 202 202 202 182 182 182 18 18 18
38581 - 2 2 6 2 2 6 2 2 6 2 2 6
38582 - 2 2 6 2 2 6 2 2 6 2 2 6
38583 - 2 2 6 62 62 62 66 66 66 30 30 30
38584 - 10 10 10 0 0 0 0 0 0 0 0 0
38585 - 0 0 0 0 0 0 0 0 0 0 0 0
38586 - 0 0 0 0 0 0 0 0 0 0 0 0
38587 - 0 0 0 0 0 0 0 0 0 0 0 0
38588 - 0 0 0 0 0 0 0 0 0 0 0 0
38589 - 0 0 0 0 0 0 0 0 0 0 0 0
38590 - 0 0 0 0 0 0 0 0 0 0 0 0
38591 - 0 0 0 0 0 0 0 0 0 0 0 0
38592 - 0 0 0 0 0 0 0 0 0 0 0 0
38593 - 14 14 14 42 42 42 82 82 82 18 18 18
38594 - 2 2 6 2 2 6 2 2 6 10 10 10
38595 - 94 94 94 182 182 182 218 218 218 242 242 242
38596 -250 250 250 253 253 253 253 253 253 250 250 250
38597 -234 234 234 253 253 253 253 253 253 253 253 253
38598 -253 253 253 253 253 253 253 253 253 246 246 246
38599 -238 238 238 226 226 226 210 210 210 202 202 202
38600 -195 195 195 195 195 195 210 210 210 158 158 158
38601 - 6 6 6 14 14 14 50 50 50 14 14 14
38602 - 2 2 6 2 2 6 2 2 6 2 2 6
38603 - 2 2 6 6 6 6 86 86 86 46 46 46
38604 - 18 18 18 6 6 6 0 0 0 0 0 0
38605 - 0 0 0 0 0 0 0 0 0 0 0 0
38606 - 0 0 0 0 0 0 0 0 0 0 0 0
38607 - 0 0 0 0 0 0 0 0 0 0 0 0
38608 - 0 0 0 0 0 0 0 0 0 0 0 0
38609 - 0 0 0 0 0 0 0 0 0 0 0 0
38610 - 0 0 0 0 0 0 0 0 0 0 0 0
38611 - 0 0 0 0 0 0 0 0 0 0 0 0
38612 - 0 0 0 0 0 0 0 0 0 6 6 6
38613 - 22 22 22 54 54 54 70 70 70 2 2 6
38614 - 2 2 6 10 10 10 2 2 6 22 22 22
38615 -166 166 166 231 231 231 250 250 250 253 253 253
38616 -253 253 253 253 253 253 253 253 253 250 250 250
38617 -242 242 242 253 253 253 253 253 253 253 253 253
38618 -253 253 253 253 253 253 253 253 253 253 253 253
38619 -253 253 253 253 253 253 253 253 253 246 246 246
38620 -231 231 231 206 206 206 198 198 198 226 226 226
38621 - 94 94 94 2 2 6 6 6 6 38 38 38
38622 - 30 30 30 2 2 6 2 2 6 2 2 6
38623 - 2 2 6 2 2 6 62 62 62 66 66 66
38624 - 26 26 26 10 10 10 0 0 0 0 0 0
38625 - 0 0 0 0 0 0 0 0 0 0 0 0
38626 - 0 0 0 0 0 0 0 0 0 0 0 0
38627 - 0 0 0 0 0 0 0 0 0 0 0 0
38628 - 0 0 0 0 0 0 0 0 0 0 0 0
38629 - 0 0 0 0 0 0 0 0 0 0 0 0
38630 - 0 0 0 0 0 0 0 0 0 0 0 0
38631 - 0 0 0 0 0 0 0 0 0 0 0 0
38632 - 0 0 0 0 0 0 0 0 0 10 10 10
38633 - 30 30 30 74 74 74 50 50 50 2 2 6
38634 - 26 26 26 26 26 26 2 2 6 106 106 106
38635 -238 238 238 253 253 253 253 253 253 253 253 253
38636 -253 253 253 253 253 253 253 253 253 253 253 253
38637 -253 253 253 253 253 253 253 253 253 253 253 253
38638 -253 253 253 253 253 253 253 253 253 253 253 253
38639 -253 253 253 253 253 253 253 253 253 253 253 253
38640 -253 253 253 246 246 246 218 218 218 202 202 202
38641 -210 210 210 14 14 14 2 2 6 2 2 6
38642 - 30 30 30 22 22 22 2 2 6 2 2 6
38643 - 2 2 6 2 2 6 18 18 18 86 86 86
38644 - 42 42 42 14 14 14 0 0 0 0 0 0
38645 - 0 0 0 0 0 0 0 0 0 0 0 0
38646 - 0 0 0 0 0 0 0 0 0 0 0 0
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 0 0 0 0 14 14 14
38653 - 42 42 42 90 90 90 22 22 22 2 2 6
38654 - 42 42 42 2 2 6 18 18 18 218 218 218
38655 -253 253 253 253 253 253 253 253 253 253 253 253
38656 -253 253 253 253 253 253 253 253 253 253 253 253
38657 -253 253 253 253 253 253 253 253 253 253 253 253
38658 -253 253 253 253 253 253 253 253 253 253 253 253
38659 -253 253 253 253 253 253 253 253 253 253 253 253
38660 -253 253 253 253 253 253 250 250 250 221 221 221
38661 -218 218 218 101 101 101 2 2 6 14 14 14
38662 - 18 18 18 38 38 38 10 10 10 2 2 6
38663 - 2 2 6 2 2 6 2 2 6 78 78 78
38664 - 58 58 58 22 22 22 6 6 6 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 0 0 0 0 6 6 6 18 18 18
38673 - 54 54 54 82 82 82 2 2 6 26 26 26
38674 - 22 22 22 2 2 6 123 123 123 253 253 253
38675 -253 253 253 253 253 253 253 253 253 253 253 253
38676 -253 253 253 253 253 253 253 253 253 253 253 253
38677 -253 253 253 253 253 253 253 253 253 253 253 253
38678 -253 253 253 253 253 253 253 253 253 253 253 253
38679 -253 253 253 253 253 253 253 253 253 253 253 253
38680 -253 253 253 253 253 253 253 253 253 250 250 250
38681 -238 238 238 198 198 198 6 6 6 38 38 38
38682 - 58 58 58 26 26 26 38 38 38 2 2 6
38683 - 2 2 6 2 2 6 2 2 6 46 46 46
38684 - 78 78 78 30 30 30 10 10 10 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 0 0 0 0 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 10 10 10 30 30 30
38693 - 74 74 74 58 58 58 2 2 6 42 42 42
38694 - 2 2 6 22 22 22 231 231 231 253 253 253
38695 -253 253 253 253 253 253 253 253 253 253 253 253
38696 -253 253 253 253 253 253 253 253 253 250 250 250
38697 -253 253 253 253 253 253 253 253 253 253 253 253
38698 -253 253 253 253 253 253 253 253 253 253 253 253
38699 -253 253 253 253 253 253 253 253 253 253 253 253
38700 -253 253 253 253 253 253 253 253 253 253 253 253
38701 -253 253 253 246 246 246 46 46 46 38 38 38
38702 - 42 42 42 14 14 14 38 38 38 14 14 14
38703 - 2 2 6 2 2 6 2 2 6 6 6 6
38704 - 86 86 86 46 46 46 14 14 14 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 0 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 6 6 6 14 14 14 42 42 42
38713 - 90 90 90 18 18 18 18 18 18 26 26 26
38714 - 2 2 6 116 116 116 253 253 253 253 253 253
38715 -253 253 253 253 253 253 253 253 253 253 253 253
38716 -253 253 253 253 253 253 250 250 250 238 238 238
38717 -253 253 253 253 253 253 253 253 253 253 253 253
38718 -253 253 253 253 253 253 253 253 253 253 253 253
38719 -253 253 253 253 253 253 253 253 253 253 253 253
38720 -253 253 253 253 253 253 253 253 253 253 253 253
38721 -253 253 253 253 253 253 94 94 94 6 6 6
38722 - 2 2 6 2 2 6 10 10 10 34 34 34
38723 - 2 2 6 2 2 6 2 2 6 2 2 6
38724 - 74 74 74 58 58 58 22 22 22 6 6 6
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 0 0 0
38728 - 0 0 0 0 0 0 0 0 0 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 10 10 10 26 26 26 66 66 66
38733 - 82 82 82 2 2 6 38 38 38 6 6 6
38734 - 14 14 14 210 210 210 253 253 253 253 253 253
38735 -253 253 253 253 253 253 253 253 253 253 253 253
38736 -253 253 253 253 253 253 246 246 246 242 242 242
38737 -253 253 253 253 253 253 253 253 253 253 253 253
38738 -253 253 253 253 253 253 253 253 253 253 253 253
38739 -253 253 253 253 253 253 253 253 253 253 253 253
38740 -253 253 253 253 253 253 253 253 253 253 253 253
38741 -253 253 253 253 253 253 144 144 144 2 2 6
38742 - 2 2 6 2 2 6 2 2 6 46 46 46
38743 - 2 2 6 2 2 6 2 2 6 2 2 6
38744 - 42 42 42 74 74 74 30 30 30 10 10 10
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 6 6 6 14 14 14 42 42 42 90 90 90
38753 - 26 26 26 6 6 6 42 42 42 2 2 6
38754 - 74 74 74 250 250 250 253 253 253 253 253 253
38755 -253 253 253 253 253 253 253 253 253 253 253 253
38756 -253 253 253 253 253 253 242 242 242 242 242 242
38757 -253 253 253 253 253 253 253 253 253 253 253 253
38758 -253 253 253 253 253 253 253 253 253 253 253 253
38759 -253 253 253 253 253 253 253 253 253 253 253 253
38760 -253 253 253 253 253 253 253 253 253 253 253 253
38761 -253 253 253 253 253 253 182 182 182 2 2 6
38762 - 2 2 6 2 2 6 2 2 6 46 46 46
38763 - 2 2 6 2 2 6 2 2 6 2 2 6
38764 - 10 10 10 86 86 86 38 38 38 10 10 10
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 0 0 0
38768 - 0 0 0 0 0 0 0 0 0 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 10 10 10 26 26 26 66 66 66 82 82 82
38773 - 2 2 6 22 22 22 18 18 18 2 2 6
38774 -149 149 149 253 253 253 253 253 253 253 253 253
38775 -253 253 253 253 253 253 253 253 253 253 253 253
38776 -253 253 253 253 253 253 234 234 234 242 242 242
38777 -253 253 253 253 253 253 253 253 253 253 253 253
38778 -253 253 253 253 253 253 253 253 253 253 253 253
38779 -253 253 253 253 253 253 253 253 253 253 253 253
38780 -253 253 253 253 253 253 253 253 253 253 253 253
38781 -253 253 253 253 253 253 206 206 206 2 2 6
38782 - 2 2 6 2 2 6 2 2 6 38 38 38
38783 - 2 2 6 2 2 6 2 2 6 2 2 6
38784 - 6 6 6 86 86 86 46 46 46 14 14 14
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 6 6 6
38792 - 18 18 18 46 46 46 86 86 86 18 18 18
38793 - 2 2 6 34 34 34 10 10 10 6 6 6
38794 -210 210 210 253 253 253 253 253 253 253 253 253
38795 -253 253 253 253 253 253 253 253 253 253 253 253
38796 -253 253 253 253 253 253 234 234 234 242 242 242
38797 -253 253 253 253 253 253 253 253 253 253 253 253
38798 -253 253 253 253 253 253 253 253 253 253 253 253
38799 -253 253 253 253 253 253 253 253 253 253 253 253
38800 -253 253 253 253 253 253 253 253 253 253 253 253
38801 -253 253 253 253 253 253 221 221 221 6 6 6
38802 - 2 2 6 2 2 6 6 6 6 30 30 30
38803 - 2 2 6 2 2 6 2 2 6 2 2 6
38804 - 2 2 6 82 82 82 54 54 54 18 18 18
38805 - 6 6 6 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 10 10 10
38812 - 26 26 26 66 66 66 62 62 62 2 2 6
38813 - 2 2 6 38 38 38 10 10 10 26 26 26
38814 -238 238 238 253 253 253 253 253 253 253 253 253
38815 -253 253 253 253 253 253 253 253 253 253 253 253
38816 -253 253 253 253 253 253 231 231 231 238 238 238
38817 -253 253 253 253 253 253 253 253 253 253 253 253
38818 -253 253 253 253 253 253 253 253 253 253 253 253
38819 -253 253 253 253 253 253 253 253 253 253 253 253
38820 -253 253 253 253 253 253 253 253 253 253 253 253
38821 -253 253 253 253 253 253 231 231 231 6 6 6
38822 - 2 2 6 2 2 6 10 10 10 30 30 30
38823 - 2 2 6 2 2 6 2 2 6 2 2 6
38824 - 2 2 6 66 66 66 58 58 58 22 22 22
38825 - 6 6 6 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 10 10 10
38832 - 38 38 38 78 78 78 6 6 6 2 2 6
38833 - 2 2 6 46 46 46 14 14 14 42 42 42
38834 -246 246 246 253 253 253 253 253 253 253 253 253
38835 -253 253 253 253 253 253 253 253 253 253 253 253
38836 -253 253 253 253 253 253 231 231 231 242 242 242
38837 -253 253 253 253 253 253 253 253 253 253 253 253
38838 -253 253 253 253 253 253 253 253 253 253 253 253
38839 -253 253 253 253 253 253 253 253 253 253 253 253
38840 -253 253 253 253 253 253 253 253 253 253 253 253
38841 -253 253 253 253 253 253 234 234 234 10 10 10
38842 - 2 2 6 2 2 6 22 22 22 14 14 14
38843 - 2 2 6 2 2 6 2 2 6 2 2 6
38844 - 2 2 6 66 66 66 62 62 62 22 22 22
38845 - 6 6 6 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 6 6 6 18 18 18
38852 - 50 50 50 74 74 74 2 2 6 2 2 6
38853 - 14 14 14 70 70 70 34 34 34 62 62 62
38854 -250 250 250 253 253 253 253 253 253 253 253 253
38855 -253 253 253 253 253 253 253 253 253 253 253 253
38856 -253 253 253 253 253 253 231 231 231 246 246 246
38857 -253 253 253 253 253 253 253 253 253 253 253 253
38858 -253 253 253 253 253 253 253 253 253 253 253 253
38859 -253 253 253 253 253 253 253 253 253 253 253 253
38860 -253 253 253 253 253 253 253 253 253 253 253 253
38861 -253 253 253 253 253 253 234 234 234 14 14 14
38862 - 2 2 6 2 2 6 30 30 30 2 2 6
38863 - 2 2 6 2 2 6 2 2 6 2 2 6
38864 - 2 2 6 66 66 66 62 62 62 22 22 22
38865 - 6 6 6 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 6 6 6 18 18 18
38872 - 54 54 54 62 62 62 2 2 6 2 2 6
38873 - 2 2 6 30 30 30 46 46 46 70 70 70
38874 -250 250 250 253 253 253 253 253 253 253 253 253
38875 -253 253 253 253 253 253 253 253 253 253 253 253
38876 -253 253 253 253 253 253 231 231 231 246 246 246
38877 -253 253 253 253 253 253 253 253 253 253 253 253
38878 -253 253 253 253 253 253 253 253 253 253 253 253
38879 -253 253 253 253 253 253 253 253 253 253 253 253
38880 -253 253 253 253 253 253 253 253 253 253 253 253
38881 -253 253 253 253 253 253 226 226 226 10 10 10
38882 - 2 2 6 6 6 6 30 30 30 2 2 6
38883 - 2 2 6 2 2 6 2 2 6 2 2 6
38884 - 2 2 6 66 66 66 58 58 58 22 22 22
38885 - 6 6 6 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 6 6 6 22 22 22
38892 - 58 58 58 62 62 62 2 2 6 2 2 6
38893 - 2 2 6 2 2 6 30 30 30 78 78 78
38894 -250 250 250 253 253 253 253 253 253 253 253 253
38895 -253 253 253 253 253 253 253 253 253 253 253 253
38896 -253 253 253 253 253 253 231 231 231 246 246 246
38897 -253 253 253 253 253 253 253 253 253 253 253 253
38898 -253 253 253 253 253 253 253 253 253 253 253 253
38899 -253 253 253 253 253 253 253 253 253 253 253 253
38900 -253 253 253 253 253 253 253 253 253 253 253 253
38901 -253 253 253 253 253 253 206 206 206 2 2 6
38902 - 22 22 22 34 34 34 18 14 6 22 22 22
38903 - 26 26 26 18 18 18 6 6 6 2 2 6
38904 - 2 2 6 82 82 82 54 54 54 18 18 18
38905 - 6 6 6 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 6 6 6 26 26 26
38912 - 62 62 62 106 106 106 74 54 14 185 133 11
38913 -210 162 10 121 92 8 6 6 6 62 62 62
38914 -238 238 238 253 253 253 253 253 253 253 253 253
38915 -253 253 253 253 253 253 253 253 253 253 253 253
38916 -253 253 253 253 253 253 231 231 231 246 246 246
38917 -253 253 253 253 253 253 253 253 253 253 253 253
38918 -253 253 253 253 253 253 253 253 253 253 253 253
38919 -253 253 253 253 253 253 253 253 253 253 253 253
38920 -253 253 253 253 253 253 253 253 253 253 253 253
38921 -253 253 253 253 253 253 158 158 158 18 18 18
38922 - 14 14 14 2 2 6 2 2 6 2 2 6
38923 - 6 6 6 18 18 18 66 66 66 38 38 38
38924 - 6 6 6 94 94 94 50 50 50 18 18 18
38925 - 6 6 6 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 6 6 6
38931 - 10 10 10 10 10 10 18 18 18 38 38 38
38932 - 78 78 78 142 134 106 216 158 10 242 186 14
38933 -246 190 14 246 190 14 156 118 10 10 10 10
38934 - 90 90 90 238 238 238 253 253 253 253 253 253
38935 -253 253 253 253 253 253 253 253 253 253 253 253
38936 -253 253 253 253 253 253 231 231 231 250 250 250
38937 -253 253 253 253 253 253 253 253 253 253 253 253
38938 -253 253 253 253 253 253 253 253 253 253 253 253
38939 -253 253 253 253 253 253 253 253 253 253 253 253
38940 -253 253 253 253 253 253 253 253 253 246 230 190
38941 -238 204 91 238 204 91 181 142 44 37 26 9
38942 - 2 2 6 2 2 6 2 2 6 2 2 6
38943 - 2 2 6 2 2 6 38 38 38 46 46 46
38944 - 26 26 26 106 106 106 54 54 54 18 18 18
38945 - 6 6 6 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 6 6 6 14 14 14 22 22 22
38951 - 30 30 30 38 38 38 50 50 50 70 70 70
38952 -106 106 106 190 142 34 226 170 11 242 186 14
38953 -246 190 14 246 190 14 246 190 14 154 114 10
38954 - 6 6 6 74 74 74 226 226 226 253 253 253
38955 -253 253 253 253 253 253 253 253 253 253 253 253
38956 -253 253 253 253 253 253 231 231 231 250 250 250
38957 -253 253 253 253 253 253 253 253 253 253 253 253
38958 -253 253 253 253 253 253 253 253 253 253 253 253
38959 -253 253 253 253 253 253 253 253 253 253 253 253
38960 -253 253 253 253 253 253 253 253 253 228 184 62
38961 -241 196 14 241 208 19 232 195 16 38 30 10
38962 - 2 2 6 2 2 6 2 2 6 2 2 6
38963 - 2 2 6 6 6 6 30 30 30 26 26 26
38964 -203 166 17 154 142 90 66 66 66 26 26 26
38965 - 6 6 6 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 0 0 0 0
38968 - 0 0 0 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 6 6 6 18 18 18 38 38 38 58 58 58
38971 - 78 78 78 86 86 86 101 101 101 123 123 123
38972 -175 146 61 210 150 10 234 174 13 246 186 14
38973 -246 190 14 246 190 14 246 190 14 238 190 10
38974 -102 78 10 2 2 6 46 46 46 198 198 198
38975 -253 253 253 253 253 253 253 253 253 253 253 253
38976 -253 253 253 253 253 253 234 234 234 242 242 242
38977 -253 253 253 253 253 253 253 253 253 253 253 253
38978 -253 253 253 253 253 253 253 253 253 253 253 253
38979 -253 253 253 253 253 253 253 253 253 253 253 253
38980 -253 253 253 253 253 253 253 253 253 224 178 62
38981 -242 186 14 241 196 14 210 166 10 22 18 6
38982 - 2 2 6 2 2 6 2 2 6 2 2 6
38983 - 2 2 6 2 2 6 6 6 6 121 92 8
38984 -238 202 15 232 195 16 82 82 82 34 34 34
38985 - 10 10 10 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 14 14 14 38 38 38 70 70 70 154 122 46
38991 -190 142 34 200 144 11 197 138 11 197 138 11
38992 -213 154 11 226 170 11 242 186 14 246 190 14
38993 -246 190 14 246 190 14 246 190 14 246 190 14
38994 -225 175 15 46 32 6 2 2 6 22 22 22
38995 -158 158 158 250 250 250 253 253 253 253 253 253
38996 -253 253 253 253 253 253 253 253 253 253 253 253
38997 -253 253 253 253 253 253 253 253 253 253 253 253
38998 -253 253 253 253 253 253 253 253 253 253 253 253
38999 -253 253 253 253 253 253 253 253 253 253 253 253
39000 -253 253 253 250 250 250 242 242 242 224 178 62
39001 -239 182 13 236 186 11 213 154 11 46 32 6
39002 - 2 2 6 2 2 6 2 2 6 2 2 6
39003 - 2 2 6 2 2 6 61 42 6 225 175 15
39004 -238 190 10 236 186 11 112 100 78 42 42 42
39005 - 14 14 14 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 0 0 0 0
39008 - 0 0 0 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 6 6 6
39010 - 22 22 22 54 54 54 154 122 46 213 154 11
39011 -226 170 11 230 174 11 226 170 11 226 170 11
39012 -236 178 12 242 186 14 246 190 14 246 190 14
39013 -246 190 14 246 190 14 246 190 14 246 190 14
39014 -241 196 14 184 144 12 10 10 10 2 2 6
39015 - 6 6 6 116 116 116 242 242 242 253 253 253
39016 -253 253 253 253 253 253 253 253 253 253 253 253
39017 -253 253 253 253 253 253 253 253 253 253 253 253
39018 -253 253 253 253 253 253 253 253 253 253 253 253
39019 -253 253 253 253 253 253 253 253 253 253 253 253
39020 -253 253 253 231 231 231 198 198 198 214 170 54
39021 -236 178 12 236 178 12 210 150 10 137 92 6
39022 - 18 14 6 2 2 6 2 2 6 2 2 6
39023 - 6 6 6 70 47 6 200 144 11 236 178 12
39024 -239 182 13 239 182 13 124 112 88 58 58 58
39025 - 22 22 22 6 6 6 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 0 0 0
39028 - 0 0 0 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 10 10 10
39030 - 30 30 30 70 70 70 180 133 36 226 170 11
39031 -239 182 13 242 186 14 242 186 14 246 186 14
39032 -246 190 14 246 190 14 246 190 14 246 190 14
39033 -246 190 14 246 190 14 246 190 14 246 190 14
39034 -246 190 14 232 195 16 98 70 6 2 2 6
39035 - 2 2 6 2 2 6 66 66 66 221 221 221
39036 -253 253 253 253 253 253 253 253 253 253 253 253
39037 -253 253 253 253 253 253 253 253 253 253 253 253
39038 -253 253 253 253 253 253 253 253 253 253 253 253
39039 -253 253 253 253 253 253 253 253 253 253 253 253
39040 -253 253 253 206 206 206 198 198 198 214 166 58
39041 -230 174 11 230 174 11 216 158 10 192 133 9
39042 -163 110 8 116 81 8 102 78 10 116 81 8
39043 -167 114 7 197 138 11 226 170 11 239 182 13
39044 -242 186 14 242 186 14 162 146 94 78 78 78
39045 - 34 34 34 14 14 14 6 6 6 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 0 0 0 0
39048 - 0 0 0 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 6 6 6
39050 - 30 30 30 78 78 78 190 142 34 226 170 11
39051 -239 182 13 246 190 14 246 190 14 246 190 14
39052 -246 190 14 246 190 14 246 190 14 246 190 14
39053 -246 190 14 246 190 14 246 190 14 246 190 14
39054 -246 190 14 241 196 14 203 166 17 22 18 6
39055 - 2 2 6 2 2 6 2 2 6 38 38 38
39056 -218 218 218 253 253 253 253 253 253 253 253 253
39057 -253 253 253 253 253 253 253 253 253 253 253 253
39058 -253 253 253 253 253 253 253 253 253 253 253 253
39059 -253 253 253 253 253 253 253 253 253 253 253 253
39060 -250 250 250 206 206 206 198 198 198 202 162 69
39061 -226 170 11 236 178 12 224 166 10 210 150 10
39062 -200 144 11 197 138 11 192 133 9 197 138 11
39063 -210 150 10 226 170 11 242 186 14 246 190 14
39064 -246 190 14 246 186 14 225 175 15 124 112 88
39065 - 62 62 62 30 30 30 14 14 14 6 6 6
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 10 10 10
39070 - 30 30 30 78 78 78 174 135 50 224 166 10
39071 -239 182 13 246 190 14 246 190 14 246 190 14
39072 -246 190 14 246 190 14 246 190 14 246 190 14
39073 -246 190 14 246 190 14 246 190 14 246 190 14
39074 -246 190 14 246 190 14 241 196 14 139 102 15
39075 - 2 2 6 2 2 6 2 2 6 2 2 6
39076 - 78 78 78 250 250 250 253 253 253 253 253 253
39077 -253 253 253 253 253 253 253 253 253 253 253 253
39078 -253 253 253 253 253 253 253 253 253 253 253 253
39079 -253 253 253 253 253 253 253 253 253 253 253 253
39080 -250 250 250 214 214 214 198 198 198 190 150 46
39081 -219 162 10 236 178 12 234 174 13 224 166 10
39082 -216 158 10 213 154 11 213 154 11 216 158 10
39083 -226 170 11 239 182 13 246 190 14 246 190 14
39084 -246 190 14 246 190 14 242 186 14 206 162 42
39085 -101 101 101 58 58 58 30 30 30 14 14 14
39086 - 6 6 6 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 10 10 10
39090 - 30 30 30 74 74 74 174 135 50 216 158 10
39091 -236 178 12 246 190 14 246 190 14 246 190 14
39092 -246 190 14 246 190 14 246 190 14 246 190 14
39093 -246 190 14 246 190 14 246 190 14 246 190 14
39094 -246 190 14 246 190 14 241 196 14 226 184 13
39095 - 61 42 6 2 2 6 2 2 6 2 2 6
39096 - 22 22 22 238 238 238 253 253 253 253 253 253
39097 -253 253 253 253 253 253 253 253 253 253 253 253
39098 -253 253 253 253 253 253 253 253 253 253 253 253
39099 -253 253 253 253 253 253 253 253 253 253 253 253
39100 -253 253 253 226 226 226 187 187 187 180 133 36
39101 -216 158 10 236 178 12 239 182 13 236 178 12
39102 -230 174 11 226 170 11 226 170 11 230 174 11
39103 -236 178 12 242 186 14 246 190 14 246 190 14
39104 -246 190 14 246 190 14 246 186 14 239 182 13
39105 -206 162 42 106 106 106 66 66 66 34 34 34
39106 - 14 14 14 6 6 6 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 6 6 6
39110 - 26 26 26 70 70 70 163 133 67 213 154 11
39111 -236 178 12 246 190 14 246 190 14 246 190 14
39112 -246 190 14 246 190 14 246 190 14 246 190 14
39113 -246 190 14 246 190 14 246 190 14 246 190 14
39114 -246 190 14 246 190 14 246 190 14 241 196 14
39115 -190 146 13 18 14 6 2 2 6 2 2 6
39116 - 46 46 46 246 246 246 253 253 253 253 253 253
39117 -253 253 253 253 253 253 253 253 253 253 253 253
39118 -253 253 253 253 253 253 253 253 253 253 253 253
39119 -253 253 253 253 253 253 253 253 253 253 253 253
39120 -253 253 253 221 221 221 86 86 86 156 107 11
39121 -216 158 10 236 178 12 242 186 14 246 186 14
39122 -242 186 14 239 182 13 239 182 13 242 186 14
39123 -242 186 14 246 186 14 246 190 14 246 190 14
39124 -246 190 14 246 190 14 246 190 14 246 190 14
39125 -242 186 14 225 175 15 142 122 72 66 66 66
39126 - 30 30 30 10 10 10 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 6 6 6
39130 - 26 26 26 70 70 70 163 133 67 210 150 10
39131 -236 178 12 246 190 14 246 190 14 246 190 14
39132 -246 190 14 246 190 14 246 190 14 246 190 14
39133 -246 190 14 246 190 14 246 190 14 246 190 14
39134 -246 190 14 246 190 14 246 190 14 246 190 14
39135 -232 195 16 121 92 8 34 34 34 106 106 106
39136 -221 221 221 253 253 253 253 253 253 253 253 253
39137 -253 253 253 253 253 253 253 253 253 253 253 253
39138 -253 253 253 253 253 253 253 253 253 253 253 253
39139 -253 253 253 253 253 253 253 253 253 253 253 253
39140 -242 242 242 82 82 82 18 14 6 163 110 8
39141 -216 158 10 236 178 12 242 186 14 246 190 14
39142 -246 190 14 246 190 14 246 190 14 246 190 14
39143 -246 190 14 246 190 14 246 190 14 246 190 14
39144 -246 190 14 246 190 14 246 190 14 246 190 14
39145 -246 190 14 246 190 14 242 186 14 163 133 67
39146 - 46 46 46 18 18 18 6 6 6 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 10 10 10
39150 - 30 30 30 78 78 78 163 133 67 210 150 10
39151 -236 178 12 246 186 14 246 190 14 246 190 14
39152 -246 190 14 246 190 14 246 190 14 246 190 14
39153 -246 190 14 246 190 14 246 190 14 246 190 14
39154 -246 190 14 246 190 14 246 190 14 246 190 14
39155 -241 196 14 215 174 15 190 178 144 253 253 253
39156 -253 253 253 253 253 253 253 253 253 253 253 253
39157 -253 253 253 253 253 253 253 253 253 253 253 253
39158 -253 253 253 253 253 253 253 253 253 253 253 253
39159 -253 253 253 253 253 253 253 253 253 218 218 218
39160 - 58 58 58 2 2 6 22 18 6 167 114 7
39161 -216 158 10 236 178 12 246 186 14 246 190 14
39162 -246 190 14 246 190 14 246 190 14 246 190 14
39163 -246 190 14 246 190 14 246 190 14 246 190 14
39164 -246 190 14 246 190 14 246 190 14 246 190 14
39165 -246 190 14 246 186 14 242 186 14 190 150 46
39166 - 54 54 54 22 22 22 6 6 6 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 14 14 14
39170 - 38 38 38 86 86 86 180 133 36 213 154 11
39171 -236 178 12 246 186 14 246 190 14 246 190 14
39172 -246 190 14 246 190 14 246 190 14 246 190 14
39173 -246 190 14 246 190 14 246 190 14 246 190 14
39174 -246 190 14 246 190 14 246 190 14 246 190 14
39175 -246 190 14 232 195 16 190 146 13 214 214 214
39176 -253 253 253 253 253 253 253 253 253 253 253 253
39177 -253 253 253 253 253 253 253 253 253 253 253 253
39178 -253 253 253 253 253 253 253 253 253 253 253 253
39179 -253 253 253 250 250 250 170 170 170 26 26 26
39180 - 2 2 6 2 2 6 37 26 9 163 110 8
39181 -219 162 10 239 182 13 246 186 14 246 190 14
39182 -246 190 14 246 190 14 246 190 14 246 190 14
39183 -246 190 14 246 190 14 246 190 14 246 190 14
39184 -246 190 14 246 190 14 246 190 14 246 190 14
39185 -246 186 14 236 178 12 224 166 10 142 122 72
39186 - 46 46 46 18 18 18 6 6 6 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 6 6 6 18 18 18
39190 - 50 50 50 109 106 95 192 133 9 224 166 10
39191 -242 186 14 246 190 14 246 190 14 246 190 14
39192 -246 190 14 246 190 14 246 190 14 246 190 14
39193 -246 190 14 246 190 14 246 190 14 246 190 14
39194 -246 190 14 246 190 14 246 190 14 246 190 14
39195 -242 186 14 226 184 13 210 162 10 142 110 46
39196 -226 226 226 253 253 253 253 253 253 253 253 253
39197 -253 253 253 253 253 253 253 253 253 253 253 253
39198 -253 253 253 253 253 253 253 253 253 253 253 253
39199 -198 198 198 66 66 66 2 2 6 2 2 6
39200 - 2 2 6 2 2 6 50 34 6 156 107 11
39201 -219 162 10 239 182 13 246 186 14 246 190 14
39202 -246 190 14 246 190 14 246 190 14 246 190 14
39203 -246 190 14 246 190 14 246 190 14 246 190 14
39204 -246 190 14 246 190 14 246 190 14 242 186 14
39205 -234 174 13 213 154 11 154 122 46 66 66 66
39206 - 30 30 30 10 10 10 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 6 6 6 22 22 22
39210 - 58 58 58 154 121 60 206 145 10 234 174 13
39211 -242 186 14 246 186 14 246 190 14 246 190 14
39212 -246 190 14 246 190 14 246 190 14 246 190 14
39213 -246 190 14 246 190 14 246 190 14 246 190 14
39214 -246 190 14 246 190 14 246 190 14 246 190 14
39215 -246 186 14 236 178 12 210 162 10 163 110 8
39216 - 61 42 6 138 138 138 218 218 218 250 250 250
39217 -253 253 253 253 253 253 253 253 253 250 250 250
39218 -242 242 242 210 210 210 144 144 144 66 66 66
39219 - 6 6 6 2 2 6 2 2 6 2 2 6
39220 - 2 2 6 2 2 6 61 42 6 163 110 8
39221 -216 158 10 236 178 12 246 190 14 246 190 14
39222 -246 190 14 246 190 14 246 190 14 246 190 14
39223 -246 190 14 246 190 14 246 190 14 246 190 14
39224 -246 190 14 239 182 13 230 174 11 216 158 10
39225 -190 142 34 124 112 88 70 70 70 38 38 38
39226 - 18 18 18 6 6 6 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 6 6 6 22 22 22
39230 - 62 62 62 168 124 44 206 145 10 224 166 10
39231 -236 178 12 239 182 13 242 186 14 242 186 14
39232 -246 186 14 246 190 14 246 190 14 246 190 14
39233 -246 190 14 246 190 14 246 190 14 246 190 14
39234 -246 190 14 246 190 14 246 190 14 246 190 14
39235 -246 190 14 236 178 12 216 158 10 175 118 6
39236 - 80 54 7 2 2 6 6 6 6 30 30 30
39237 - 54 54 54 62 62 62 50 50 50 38 38 38
39238 - 14 14 14 2 2 6 2 2 6 2 2 6
39239 - 2 2 6 2 2 6 2 2 6 2 2 6
39240 - 2 2 6 6 6 6 80 54 7 167 114 7
39241 -213 154 11 236 178 12 246 190 14 246 190 14
39242 -246 190 14 246 190 14 246 190 14 246 190 14
39243 -246 190 14 242 186 14 239 182 13 239 182 13
39244 -230 174 11 210 150 10 174 135 50 124 112 88
39245 - 82 82 82 54 54 54 34 34 34 18 18 18
39246 - 6 6 6 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 6 6 6 18 18 18
39250 - 50 50 50 158 118 36 192 133 9 200 144 11
39251 -216 158 10 219 162 10 224 166 10 226 170 11
39252 -230 174 11 236 178 12 239 182 13 239 182 13
39253 -242 186 14 246 186 14 246 190 14 246 190 14
39254 -246 190 14 246 190 14 246 190 14 246 190 14
39255 -246 186 14 230 174 11 210 150 10 163 110 8
39256 -104 69 6 10 10 10 2 2 6 2 2 6
39257 - 2 2 6 2 2 6 2 2 6 2 2 6
39258 - 2 2 6 2 2 6 2 2 6 2 2 6
39259 - 2 2 6 2 2 6 2 2 6 2 2 6
39260 - 2 2 6 6 6 6 91 60 6 167 114 7
39261 -206 145 10 230 174 11 242 186 14 246 190 14
39262 -246 190 14 246 190 14 246 186 14 242 186 14
39263 -239 182 13 230 174 11 224 166 10 213 154 11
39264 -180 133 36 124 112 88 86 86 86 58 58 58
39265 - 38 38 38 22 22 22 10 10 10 6 6 6
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 14 14 14
39270 - 34 34 34 70 70 70 138 110 50 158 118 36
39271 -167 114 7 180 123 7 192 133 9 197 138 11
39272 -200 144 11 206 145 10 213 154 11 219 162 10
39273 -224 166 10 230 174 11 239 182 13 242 186 14
39274 -246 186 14 246 186 14 246 186 14 246 186 14
39275 -239 182 13 216 158 10 185 133 11 152 99 6
39276 -104 69 6 18 14 6 2 2 6 2 2 6
39277 - 2 2 6 2 2 6 2 2 6 2 2 6
39278 - 2 2 6 2 2 6 2 2 6 2 2 6
39279 - 2 2 6 2 2 6 2 2 6 2 2 6
39280 - 2 2 6 6 6 6 80 54 7 152 99 6
39281 -192 133 9 219 162 10 236 178 12 239 182 13
39282 -246 186 14 242 186 14 239 182 13 236 178 12
39283 -224 166 10 206 145 10 192 133 9 154 121 60
39284 - 94 94 94 62 62 62 42 42 42 22 22 22
39285 - 14 14 14 6 6 6 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 6 6 6
39290 - 18 18 18 34 34 34 58 58 58 78 78 78
39291 -101 98 89 124 112 88 142 110 46 156 107 11
39292 -163 110 8 167 114 7 175 118 6 180 123 7
39293 -185 133 11 197 138 11 210 150 10 219 162 10
39294 -226 170 11 236 178 12 236 178 12 234 174 13
39295 -219 162 10 197 138 11 163 110 8 130 83 6
39296 - 91 60 6 10 10 10 2 2 6 2 2 6
39297 - 18 18 18 38 38 38 38 38 38 38 38 38
39298 - 38 38 38 38 38 38 38 38 38 38 38 38
39299 - 38 38 38 38 38 38 26 26 26 2 2 6
39300 - 2 2 6 6 6 6 70 47 6 137 92 6
39301 -175 118 6 200 144 11 219 162 10 230 174 11
39302 -234 174 13 230 174 11 219 162 10 210 150 10
39303 -192 133 9 163 110 8 124 112 88 82 82 82
39304 - 50 50 50 30 30 30 14 14 14 6 6 6
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 6 6 6 14 14 14 22 22 22 34 34 34
39311 - 42 42 42 58 58 58 74 74 74 86 86 86
39312 -101 98 89 122 102 70 130 98 46 121 87 25
39313 -137 92 6 152 99 6 163 110 8 180 123 7
39314 -185 133 11 197 138 11 206 145 10 200 144 11
39315 -180 123 7 156 107 11 130 83 6 104 69 6
39316 - 50 34 6 54 54 54 110 110 110 101 98 89
39317 - 86 86 86 82 82 82 78 78 78 78 78 78
39318 - 78 78 78 78 78 78 78 78 78 78 78 78
39319 - 78 78 78 82 82 82 86 86 86 94 94 94
39320 -106 106 106 101 101 101 86 66 34 124 80 6
39321 -156 107 11 180 123 7 192 133 9 200 144 11
39322 -206 145 10 200 144 11 192 133 9 175 118 6
39323 -139 102 15 109 106 95 70 70 70 42 42 42
39324 - 22 22 22 10 10 10 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 6 6 6 10 10 10
39331 - 14 14 14 22 22 22 30 30 30 38 38 38
39332 - 50 50 50 62 62 62 74 74 74 90 90 90
39333 -101 98 89 112 100 78 121 87 25 124 80 6
39334 -137 92 6 152 99 6 152 99 6 152 99 6
39335 -138 86 6 124 80 6 98 70 6 86 66 30
39336 -101 98 89 82 82 82 58 58 58 46 46 46
39337 - 38 38 38 34 34 34 34 34 34 34 34 34
39338 - 34 34 34 34 34 34 34 34 34 34 34 34
39339 - 34 34 34 34 34 34 38 38 38 42 42 42
39340 - 54 54 54 82 82 82 94 86 76 91 60 6
39341 -134 86 6 156 107 11 167 114 7 175 118 6
39342 -175 118 6 167 114 7 152 99 6 121 87 25
39343 -101 98 89 62 62 62 34 34 34 18 18 18
39344 - 6 6 6 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 6 6 6 6 6 6 10 10 10
39352 - 18 18 18 22 22 22 30 30 30 42 42 42
39353 - 50 50 50 66 66 66 86 86 86 101 98 89
39354 -106 86 58 98 70 6 104 69 6 104 69 6
39355 -104 69 6 91 60 6 82 62 34 90 90 90
39356 - 62 62 62 38 38 38 22 22 22 14 14 14
39357 - 10 10 10 10 10 10 10 10 10 10 10 10
39358 - 10 10 10 10 10 10 6 6 6 10 10 10
39359 - 10 10 10 10 10 10 10 10 10 14 14 14
39360 - 22 22 22 42 42 42 70 70 70 89 81 66
39361 - 80 54 7 104 69 6 124 80 6 137 92 6
39362 -134 86 6 116 81 8 100 82 52 86 86 86
39363 - 58 58 58 30 30 30 14 14 14 6 6 6
39364 - 0 0 0 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 0 0 0 6 6 6 10 10 10 14 14 14
39373 - 18 18 18 26 26 26 38 38 38 54 54 54
39374 - 70 70 70 86 86 86 94 86 76 89 81 66
39375 - 89 81 66 86 86 86 74 74 74 50 50 50
39376 - 30 30 30 14 14 14 6 6 6 0 0 0
39377 - 0 0 0 0 0 0 0 0 0 0 0 0
39378 - 0 0 0 0 0 0 0 0 0 0 0 0
39379 - 0 0 0 0 0 0 0 0 0 0 0 0
39380 - 6 6 6 18 18 18 34 34 34 58 58 58
39381 - 82 82 82 89 81 66 89 81 66 89 81 66
39382 - 94 86 66 94 86 76 74 74 74 50 50 50
39383 - 26 26 26 14 14 14 6 6 6 0 0 0
39384 - 0 0 0 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 0 0 0 0 0 0 0 0 0 0 0 0
39393 - 6 6 6 6 6 6 14 14 14 18 18 18
39394 - 30 30 30 38 38 38 46 46 46 54 54 54
39395 - 50 50 50 42 42 42 30 30 30 18 18 18
39396 - 10 10 10 0 0 0 0 0 0 0 0 0
39397 - 0 0 0 0 0 0 0 0 0 0 0 0
39398 - 0 0 0 0 0 0 0 0 0 0 0 0
39399 - 0 0 0 0 0 0 0 0 0 0 0 0
39400 - 0 0 0 6 6 6 14 14 14 26 26 26
39401 - 38 38 38 50 50 50 58 58 58 58 58 58
39402 - 54 54 54 42 42 42 30 30 30 18 18 18
39403 - 10 10 10 0 0 0 0 0 0 0 0 0
39404 - 0 0 0 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 0 0 0 0 0 0 0 0 0 0 0 0
39413 - 0 0 0 0 0 0 0 0 0 6 6 6
39414 - 6 6 6 10 10 10 14 14 14 18 18 18
39415 - 18 18 18 14 14 14 10 10 10 6 6 6
39416 - 0 0 0 0 0 0 0 0 0 0 0 0
39417 - 0 0 0 0 0 0 0 0 0 0 0 0
39418 - 0 0 0 0 0 0 0 0 0 0 0 0
39419 - 0 0 0 0 0 0 0 0 0 0 0 0
39420 - 0 0 0 0 0 0 0 0 0 6 6 6
39421 - 14 14 14 18 18 18 22 22 22 22 22 22
39422 - 18 18 18 14 14 14 10 10 10 6 6 6
39423 - 0 0 0 0 0 0 0 0 0 0 0 0
39424 - 0 0 0 0 0 0 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 0 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39439 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39441 +4 4 4 4 4 4
39442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39455 +4 4 4 4 4 4
39456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39469 +4 4 4 4 4 4
39470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39483 +4 4 4 4 4 4
39484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39497 +4 4 4 4 4 4
39498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39511 +4 4 4 4 4 4
39512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39516 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39517 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39521 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39522 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39523 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39525 +4 4 4 4 4 4
39526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39530 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39531 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39532 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39535 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39536 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39537 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39538 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39539 +4 4 4 4 4 4
39540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39544 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39545 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39546 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39549 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39550 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39551 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39552 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39553 +4 4 4 4 4 4
39554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39557 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39558 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39559 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39560 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39562 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39563 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39564 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39565 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39566 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39567 +4 4 4 4 4 4
39568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39571 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39572 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39573 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39574 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39575 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39576 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39577 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39578 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39579 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39580 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39581 +4 4 4 4 4 4
39582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39585 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39586 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39587 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39588 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39589 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39590 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39591 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39592 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39593 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39594 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39595 +4 4 4 4 4 4
39596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39598 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39599 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39600 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
39601 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
39602 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
39603 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
39604 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
39605 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
39606 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
39607 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
39608 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
39609 +4 4 4 4 4 4
39610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39612 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
39613 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
39614 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
39615 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
39616 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
39617 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
39618 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
39619 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
39620 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
39621 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
39622 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
39623 +4 4 4 4 4 4
39624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39626 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
39627 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
39628 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
39629 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
39630 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
39631 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
39632 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
39633 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
39634 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
39635 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
39636 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39637 +4 4 4 4 4 4
39638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39640 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
39641 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
39642 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
39643 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
39644 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
39645 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
39646 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
39647 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
39648 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
39649 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
39650 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
39651 +4 4 4 4 4 4
39652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39653 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
39654 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
39655 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
39656 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
39657 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
39658 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
39659 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
39660 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
39661 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
39662 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
39663 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
39664 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
39665 +4 4 4 4 4 4
39666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39667 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
39668 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
39669 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
39670 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39671 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
39672 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
39673 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
39674 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
39675 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
39676 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
39677 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
39678 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
39679 +0 0 0 4 4 4
39680 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39681 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
39682 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
39683 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
39684 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
39685 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
39686 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
39687 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
39688 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
39689 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
39690 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
39691 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
39692 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
39693 +2 0 0 0 0 0
39694 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
39695 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
39696 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
39697 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
39698 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
39699 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
39700 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
39701 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
39702 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
39703 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
39704 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
39705 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
39706 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
39707 +37 38 37 0 0 0
39708 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39709 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
39710 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
39711 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
39712 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
39713 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
39714 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
39715 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
39716 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
39717 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
39718 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
39719 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
39720 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
39721 +85 115 134 4 0 0
39722 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
39723 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
39724 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
39725 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
39726 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
39727 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
39728 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
39729 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
39730 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
39731 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
39732 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
39733 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
39734 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
39735 +60 73 81 4 0 0
39736 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
39737 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
39738 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
39739 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
39740 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
39741 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
39742 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
39743 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
39744 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
39745 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
39746 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
39747 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
39748 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
39749 +16 19 21 4 0 0
39750 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
39751 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
39752 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
39753 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
39754 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
39755 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
39756 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
39757 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
39758 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
39759 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
39760 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
39761 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
39762 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
39763 +4 0 0 4 3 3
39764 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
39765 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
39766 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
39767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
39768 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
39769 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
39770 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
39771 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
39772 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
39773 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
39774 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
39775 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
39776 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
39777 +3 2 2 4 4 4
39778 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
39779 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
39780 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
39781 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39782 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
39783 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
39784 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
39785 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
39786 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
39787 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
39788 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
39789 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
39790 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
39791 +4 4 4 4 4 4
39792 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
39793 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
39794 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
39795 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
39796 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
39797 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
39798 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
39799 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
39800 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
39801 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
39802 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
39803 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
39804 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
39805 +4 4 4 4 4 4
39806 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
39807 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
39808 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
39809 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
39810 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
39811 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39812 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
39813 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
39814 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
39815 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
39816 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
39817 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
39818 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
39819 +5 5 5 5 5 5
39820 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
39821 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
39822 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
39823 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
39824 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
39825 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39826 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
39827 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
39828 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
39829 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
39830 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
39831 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
39832 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39833 +5 5 5 4 4 4
39834 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
39835 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
39836 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
39837 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
39838 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39839 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
39840 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
39841 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
39842 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
39843 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
39844 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
39845 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39847 +4 4 4 4 4 4
39848 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
39849 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
39850 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
39851 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
39852 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
39853 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39854 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39855 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
39856 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
39857 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
39858 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
39859 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
39860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39861 +4 4 4 4 4 4
39862 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
39863 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
39864 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
39865 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
39866 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39867 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
39868 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
39869 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
39870 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
39871 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
39872 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
39873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39875 +4 4 4 4 4 4
39876 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
39877 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
39878 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
39879 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
39880 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39881 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39882 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39883 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
39884 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
39885 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
39886 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
39887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39889 +4 4 4 4 4 4
39890 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
39891 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
39892 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
39893 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
39894 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39895 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
39896 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39897 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
39898 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
39899 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
39900 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39903 +4 4 4 4 4 4
39904 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
39905 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
39906 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
39907 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
39908 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39909 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
39910 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
39911 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
39912 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
39913 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
39914 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
39915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39917 +4 4 4 4 4 4
39918 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
39919 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
39920 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
39921 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
39922 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39923 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
39924 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
39925 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
39926 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
39927 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
39928 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
39929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39931 +4 4 4 4 4 4
39932 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
39933 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
39934 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
39935 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39936 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
39937 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
39938 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
39939 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
39940 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
39941 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
39942 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39945 +4 4 4 4 4 4
39946 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
39947 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
39948 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
39949 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39950 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39951 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
39952 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
39953 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
39954 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
39955 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
39956 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39959 +4 4 4 4 4 4
39960 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
39961 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
39962 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39963 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39964 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39965 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
39966 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
39967 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
39968 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
39969 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
39970 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39973 +4 4 4 4 4 4
39974 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
39975 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
39976 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
39977 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39978 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39979 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
39980 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
39981 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
39982 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
39983 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39984 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39987 +4 4 4 4 4 4
39988 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
39989 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
39990 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39991 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
39992 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
39993 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
39994 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
39995 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
39996 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39997 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39998 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40001 +4 4 4 4 4 4
40002 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40003 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40004 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40005 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40006 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40007 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40008 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40009 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40010 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40011 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40012 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40015 +4 4 4 4 4 4
40016 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40017 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40018 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40019 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40020 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40021 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40022 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40023 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40024 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40025 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40026 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40029 +4 4 4 4 4 4
40030 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40031 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40032 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40033 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40034 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40035 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40036 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40037 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40038 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40039 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40040 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40043 +4 4 4 4 4 4
40044 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40045 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40046 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40047 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40048 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40049 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40050 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40051 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40052 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40053 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40054 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40057 +4 4 4 4 4 4
40058 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40059 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40060 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40061 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40062 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40063 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40064 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40065 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40066 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40067 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40068 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40071 +4 4 4 4 4 4
40072 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40073 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40074 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40075 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40076 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40077 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40078 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40079 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40080 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40081 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40082 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40085 +4 4 4 4 4 4
40086 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40087 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40088 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40089 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40090 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40091 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40092 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40093 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40094 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40095 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40096 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40099 +4 4 4 4 4 4
40100 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40101 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40102 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40103 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40104 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40105 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40106 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40107 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40108 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40109 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40110 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40113 +4 4 4 4 4 4
40114 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40115 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40116 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40117 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40118 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40119 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40120 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40121 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40122 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40123 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40124 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40127 +4 4 4 4 4 4
40128 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40129 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40130 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40131 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40132 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40133 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40134 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40135 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40136 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40137 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40138 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40141 +4 4 4 4 4 4
40142 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40143 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40144 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40145 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40146 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40147 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40148 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40149 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40150 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40151 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40152 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40155 +4 4 4 4 4 4
40156 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40157 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40158 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40159 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40160 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40161 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40162 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40163 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40164 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40165 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40166 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40169 +4 4 4 4 4 4
40170 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40171 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40172 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40173 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40174 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40175 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40176 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40177 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40178 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40179 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40180 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40183 +4 4 4 4 4 4
40184 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40185 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40186 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40187 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40188 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40189 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40190 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40191 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40192 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40193 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40194 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40197 +4 4 4 4 4 4
40198 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40199 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40200 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40201 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40202 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40203 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40204 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40205 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40206 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40207 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40208 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40211 +4 4 4 4 4 4
40212 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40213 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40214 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40215 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40216 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40217 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40218 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40219 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40220 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40221 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40222 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40225 +4 4 4 4 4 4
40226 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40227 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40228 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40229 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40230 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40231 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40232 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40233 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40234 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40235 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40236 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40239 +4 4 4 4 4 4
40240 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40241 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40242 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40243 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40244 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40245 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40246 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40247 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40248 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40249 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40253 +4 4 4 4 4 4
40254 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40255 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40256 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40257 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40258 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40259 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40260 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40261 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40262 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40263 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267 +4 4 4 4 4 4
40268 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40269 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40270 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40271 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40272 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40273 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40274 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40275 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40276 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40277 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281 +4 4 4 4 4 4
40282 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40283 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40284 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40285 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40286 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40287 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40288 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40289 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40290 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40291 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295 +4 4 4 4 4 4
40296 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40297 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40298 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40299 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40300 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40301 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40302 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40303 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40304 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309 +4 4 4 4 4 4
40310 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40311 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40312 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40313 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40314 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40315 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40316 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40317 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40318 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323 +4 4 4 4 4 4
40324 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40325 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40326 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40327 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40328 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40329 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40330 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40331 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40332 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40337 +4 4 4 4 4 4
40338 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40339 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40340 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40341 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40342 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40343 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40344 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40345 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40351 +4 4 4 4 4 4
40352 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40353 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40354 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40355 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40356 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40357 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40358 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40359 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40365 +4 4 4 4 4 4
40366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40367 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40368 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40369 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40370 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40371 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40372 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40373 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379 +4 4 4 4 4 4
40380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40382 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40383 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40384 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40385 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40386 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40387 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393 +4 4 4 4 4 4
40394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40396 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40397 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40398 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40399 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40400 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40401 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407 +4 4 4 4 4 4
40408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40410 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40411 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40412 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40413 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40414 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40415 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421 +4 4 4 4 4 4
40422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40425 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40426 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40427 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40428 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435 +4 4 4 4 4 4
40436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40440 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40441 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40442 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449 +4 4 4 4 4 4
40450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40454 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40455 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40456 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463 +4 4 4 4 4 4
40464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40467 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40468 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40469 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40470 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477 +4 4 4 4 4 4
40478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40482 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40483 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40484 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491 +4 4 4 4 4 4
40492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40496 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40497 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40498 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505 +4 4 4 4 4 4
40506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40510 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40511 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40512 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40519 +4 4 4 4 4 4
40520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40524 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40525 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40533 +4 4 4 4 4 4
40534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40538 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40539 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40547 +4 4 4 4 4 4
40548 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40549 index a197731..6c3af9d 100644
40550 --- a/drivers/video/udlfb.c
40551 +++ b/drivers/video/udlfb.c
40552 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40553 dlfb_urb_completion(urb);
40554
40555 error:
40556 - atomic_add(bytes_sent, &dev->bytes_sent);
40557 - atomic_add(bytes_identical, &dev->bytes_identical);
40558 - atomic_add(width*height*2, &dev->bytes_rendered);
40559 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40560 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40561 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40562 end_cycles = get_cycles();
40563 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40564 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40565 >> 10)), /* Kcycles */
40566 &dev->cpu_kcycles_used);
40567
40568 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40569 dlfb_urb_completion(urb);
40570
40571 error:
40572 - atomic_add(bytes_sent, &dev->bytes_sent);
40573 - atomic_add(bytes_identical, &dev->bytes_identical);
40574 - atomic_add(bytes_rendered, &dev->bytes_rendered);
40575 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40576 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40577 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40578 end_cycles = get_cycles();
40579 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40580 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40581 >> 10)), /* Kcycles */
40582 &dev->cpu_kcycles_used);
40583 }
40584 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40585 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40586 struct dlfb_data *dev = fb_info->par;
40587 return snprintf(buf, PAGE_SIZE, "%u\n",
40588 - atomic_read(&dev->bytes_rendered));
40589 + atomic_read_unchecked(&dev->bytes_rendered));
40590 }
40591
40592 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40593 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40594 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40595 struct dlfb_data *dev = fb_info->par;
40596 return snprintf(buf, PAGE_SIZE, "%u\n",
40597 - atomic_read(&dev->bytes_identical));
40598 + atomic_read_unchecked(&dev->bytes_identical));
40599 }
40600
40601 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40602 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40603 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40604 struct dlfb_data *dev = fb_info->par;
40605 return snprintf(buf, PAGE_SIZE, "%u\n",
40606 - atomic_read(&dev->bytes_sent));
40607 + atomic_read_unchecked(&dev->bytes_sent));
40608 }
40609
40610 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40611 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40612 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40613 struct dlfb_data *dev = fb_info->par;
40614 return snprintf(buf, PAGE_SIZE, "%u\n",
40615 - atomic_read(&dev->cpu_kcycles_used));
40616 + atomic_read_unchecked(&dev->cpu_kcycles_used));
40617 }
40618
40619 static ssize_t edid_show(
40620 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
40621 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40622 struct dlfb_data *dev = fb_info->par;
40623
40624 - atomic_set(&dev->bytes_rendered, 0);
40625 - atomic_set(&dev->bytes_identical, 0);
40626 - atomic_set(&dev->bytes_sent, 0);
40627 - atomic_set(&dev->cpu_kcycles_used, 0);
40628 + atomic_set_unchecked(&dev->bytes_rendered, 0);
40629 + atomic_set_unchecked(&dev->bytes_identical, 0);
40630 + atomic_set_unchecked(&dev->bytes_sent, 0);
40631 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
40632
40633 return count;
40634 }
40635 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
40636 index e7f69ef..83af4fd 100644
40637 --- a/drivers/video/uvesafb.c
40638 +++ b/drivers/video/uvesafb.c
40639 @@ -19,6 +19,7 @@
40640 #include <linux/io.h>
40641 #include <linux/mutex.h>
40642 #include <linux/slab.h>
40643 +#include <linux/moduleloader.h>
40644 #include <video/edid.h>
40645 #include <video/uvesafb.h>
40646 #ifdef CONFIG_X86
40647 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
40648 NULL,
40649 };
40650
40651 - return call_usermodehelper(v86d_path, argv, envp, 1);
40652 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
40653 }
40654
40655 /*
40656 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
40657 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
40658 par->pmi_setpal = par->ypan = 0;
40659 } else {
40660 +
40661 +#ifdef CONFIG_PAX_KERNEXEC
40662 +#ifdef CONFIG_MODULES
40663 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
40664 +#endif
40665 + if (!par->pmi_code) {
40666 + par->pmi_setpal = par->ypan = 0;
40667 + return 0;
40668 + }
40669 +#endif
40670 +
40671 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
40672 + task->t.regs.edi);
40673 +
40674 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40675 + pax_open_kernel();
40676 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
40677 + pax_close_kernel();
40678 +
40679 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
40680 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
40681 +#else
40682 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
40683 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
40684 +#endif
40685 +
40686 printk(KERN_INFO "uvesafb: protected mode interface info at "
40687 "%04x:%04x\n",
40688 (u16)task->t.regs.es, (u16)task->t.regs.edi);
40689 @@ -1821,6 +1844,11 @@ out:
40690 if (par->vbe_modes)
40691 kfree(par->vbe_modes);
40692
40693 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40694 + if (par->pmi_code)
40695 + module_free_exec(NULL, par->pmi_code);
40696 +#endif
40697 +
40698 framebuffer_release(info);
40699 return err;
40700 }
40701 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
40702 kfree(par->vbe_state_orig);
40703 if (par->vbe_state_saved)
40704 kfree(par->vbe_state_saved);
40705 +
40706 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40707 + if (par->pmi_code)
40708 + module_free_exec(NULL, par->pmi_code);
40709 +#endif
40710 +
40711 }
40712
40713 framebuffer_release(info);
40714 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
40715 index 501b340..86bd4cf 100644
40716 --- a/drivers/video/vesafb.c
40717 +++ b/drivers/video/vesafb.c
40718 @@ -9,6 +9,7 @@
40719 */
40720
40721 #include <linux/module.h>
40722 +#include <linux/moduleloader.h>
40723 #include <linux/kernel.h>
40724 #include <linux/errno.h>
40725 #include <linux/string.h>
40726 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
40727 static int vram_total __initdata; /* Set total amount of memory */
40728 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
40729 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
40730 -static void (*pmi_start)(void) __read_mostly;
40731 -static void (*pmi_pal) (void) __read_mostly;
40732 +static void (*pmi_start)(void) __read_only;
40733 +static void (*pmi_pal) (void) __read_only;
40734 static int depth __read_mostly;
40735 static int vga_compat __read_mostly;
40736 /* --------------------------------------------------------------------- */
40737 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
40738 unsigned int size_vmode;
40739 unsigned int size_remap;
40740 unsigned int size_total;
40741 + void *pmi_code = NULL;
40742
40743 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
40744 return -ENODEV;
40745 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
40746 size_remap = size_total;
40747 vesafb_fix.smem_len = size_remap;
40748
40749 -#ifndef __i386__
40750 - screen_info.vesapm_seg = 0;
40751 -#endif
40752 -
40753 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
40754 printk(KERN_WARNING
40755 "vesafb: cannot reserve video memory at 0x%lx\n",
40756 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
40757 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
40758 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
40759
40760 +#ifdef __i386__
40761 +
40762 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40763 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
40764 + if (!pmi_code)
40765 +#elif !defined(CONFIG_PAX_KERNEXEC)
40766 + if (0)
40767 +#endif
40768 +
40769 +#endif
40770 + screen_info.vesapm_seg = 0;
40771 +
40772 if (screen_info.vesapm_seg) {
40773 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
40774 - screen_info.vesapm_seg,screen_info.vesapm_off);
40775 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
40776 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
40777 }
40778
40779 if (screen_info.vesapm_seg < 0xc000)
40780 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
40781
40782 if (ypan || pmi_setpal) {
40783 unsigned short *pmi_base;
40784 +
40785 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
40786 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
40787 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
40788 +
40789 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40790 + pax_open_kernel();
40791 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
40792 +#else
40793 + pmi_code = pmi_base;
40794 +#endif
40795 +
40796 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
40797 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
40798 +
40799 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40800 + pmi_start = ktva_ktla(pmi_start);
40801 + pmi_pal = ktva_ktla(pmi_pal);
40802 + pax_close_kernel();
40803 +#endif
40804 +
40805 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
40806 if (pmi_base[3]) {
40807 printk(KERN_INFO "vesafb: pmi: ports = ");
40808 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
40809 info->node, info->fix.id);
40810 return 0;
40811 err:
40812 +
40813 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40814 + module_free_exec(NULL, pmi_code);
40815 +#endif
40816 +
40817 if (info->screen_base)
40818 iounmap(info->screen_base);
40819 framebuffer_release(info);
40820 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
40821 index 88714ae..16c2e11 100644
40822 --- a/drivers/video/via/via_clock.h
40823 +++ b/drivers/video/via/via_clock.h
40824 @@ -56,7 +56,7 @@ struct via_clock {
40825
40826 void (*set_engine_pll_state)(u8 state);
40827 void (*set_engine_pll)(struct via_pll_config config);
40828 -};
40829 +} __no_const;
40830
40831
40832 static inline u32 get_pll_internal_frequency(u32 ref_freq,
40833 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
40834 index e56c934..fc22f4b 100644
40835 --- a/drivers/xen/xen-pciback/conf_space.h
40836 +++ b/drivers/xen/xen-pciback/conf_space.h
40837 @@ -44,15 +44,15 @@ struct config_field {
40838 struct {
40839 conf_dword_write write;
40840 conf_dword_read read;
40841 - } dw;
40842 + } __no_const dw;
40843 struct {
40844 conf_word_write write;
40845 conf_word_read read;
40846 - } w;
40847 + } __no_const w;
40848 struct {
40849 conf_byte_write write;
40850 conf_byte_read read;
40851 - } b;
40852 + } __no_const b;
40853 } u;
40854 struct list_head list;
40855 };
40856 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
40857 index 014c8dd..6f3dfe6 100644
40858 --- a/fs/9p/vfs_inode.c
40859 +++ b/fs/9p/vfs_inode.c
40860 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
40861 void
40862 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40863 {
40864 - char *s = nd_get_link(nd);
40865 + const char *s = nd_get_link(nd);
40866
40867 p9_debug(P9_DEBUG_VFS, " %s %s\n",
40868 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
40869 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
40870 index e95d1b6..3454244 100644
40871 --- a/fs/Kconfig.binfmt
40872 +++ b/fs/Kconfig.binfmt
40873 @@ -89,7 +89,7 @@ config HAVE_AOUT
40874
40875 config BINFMT_AOUT
40876 tristate "Kernel support for a.out and ECOFF binaries"
40877 - depends on HAVE_AOUT
40878 + depends on HAVE_AOUT && BROKEN
40879 ---help---
40880 A.out (Assembler.OUTput) is a set of formats for libraries and
40881 executables used in the earliest versions of UNIX. Linux used
40882 diff --git a/fs/aio.c b/fs/aio.c
40883 index b9d64d8..86cb1d5 100644
40884 --- a/fs/aio.c
40885 +++ b/fs/aio.c
40886 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
40887 size += sizeof(struct io_event) * nr_events;
40888 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
40889
40890 - if (nr_pages < 0)
40891 + if (nr_pages <= 0)
40892 return -EINVAL;
40893
40894 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
40895 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
40896 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
40897 {
40898 ssize_t ret;
40899 + struct iovec iovstack;
40900
40901 #ifdef CONFIG_COMPAT
40902 if (compat)
40903 ret = compat_rw_copy_check_uvector(type,
40904 (struct compat_iovec __user *)kiocb->ki_buf,
40905 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40906 + kiocb->ki_nbytes, 1, &iovstack,
40907 &kiocb->ki_iovec, 1);
40908 else
40909 #endif
40910 ret = rw_copy_check_uvector(type,
40911 (struct iovec __user *)kiocb->ki_buf,
40912 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40913 + kiocb->ki_nbytes, 1, &iovstack,
40914 &kiocb->ki_iovec, 1);
40915 if (ret < 0)
40916 goto out;
40917
40918 + if (kiocb->ki_iovec == &iovstack) {
40919 + kiocb->ki_inline_vec = iovstack;
40920 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
40921 + }
40922 kiocb->ki_nr_segs = kiocb->ki_nbytes;
40923 kiocb->ki_cur_seg = 0;
40924 /* ki_nbytes/left now reflect bytes instead of segs */
40925 diff --git a/fs/attr.c b/fs/attr.c
40926 index 95053ad..2cc93ca 100644
40927 --- a/fs/attr.c
40928 +++ b/fs/attr.c
40929 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
40930 unsigned long limit;
40931
40932 limit = rlimit(RLIMIT_FSIZE);
40933 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
40934 if (limit != RLIM_INFINITY && offset > limit)
40935 goto out_sig;
40936 if (offset > inode->i_sb->s_maxbytes)
40937 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
40938 index 9c098db..c755da5 100644
40939 --- a/fs/autofs4/waitq.c
40940 +++ b/fs/autofs4/waitq.c
40941 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
40942 {
40943 unsigned long sigpipe, flags;
40944 mm_segment_t fs;
40945 - const char *data = (const char *)addr;
40946 + const char __user *data = (const char __force_user *)addr;
40947 ssize_t wr = 0;
40948
40949 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
40950 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
40951 index 6e6d536..457113a 100644
40952 --- a/fs/befs/linuxvfs.c
40953 +++ b/fs/befs/linuxvfs.c
40954 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40955 {
40956 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
40957 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
40958 - char *link = nd_get_link(nd);
40959 + const char *link = nd_get_link(nd);
40960 if (!IS_ERR(link))
40961 kfree(link);
40962 }
40963 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
40964 index 1ff9405..f1e376a 100644
40965 --- a/fs/binfmt_aout.c
40966 +++ b/fs/binfmt_aout.c
40967 @@ -16,6 +16,7 @@
40968 #include <linux/string.h>
40969 #include <linux/fs.h>
40970 #include <linux/file.h>
40971 +#include <linux/security.h>
40972 #include <linux/stat.h>
40973 #include <linux/fcntl.h>
40974 #include <linux/ptrace.h>
40975 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
40976 #endif
40977 # define START_STACK(u) ((void __user *)u.start_stack)
40978
40979 + memset(&dump, 0, sizeof(dump));
40980 +
40981 fs = get_fs();
40982 set_fs(KERNEL_DS);
40983 has_dumped = 1;
40984 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
40985
40986 /* If the size of the dump file exceeds the rlimit, then see what would happen
40987 if we wrote the stack, but not the data area. */
40988 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
40989 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
40990 dump.u_dsize = 0;
40991
40992 /* Make sure we have enough room to write the stack and data areas. */
40993 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
40994 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
40995 dump.u_ssize = 0;
40996
40997 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
40998 rlim = rlimit(RLIMIT_DATA);
40999 if (rlim >= RLIM_INFINITY)
41000 rlim = ~0;
41001 +
41002 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41003 if (ex.a_data + ex.a_bss > rlim)
41004 return -ENOMEM;
41005
41006 @@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41007 install_exec_creds(bprm);
41008 current->flags &= ~PF_FORKNOEXEC;
41009
41010 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41011 + current->mm->pax_flags = 0UL;
41012 +#endif
41013 +
41014 +#ifdef CONFIG_PAX_PAGEEXEC
41015 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41016 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41017 +
41018 +#ifdef CONFIG_PAX_EMUTRAMP
41019 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41020 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41021 +#endif
41022 +
41023 +#ifdef CONFIG_PAX_MPROTECT
41024 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41025 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41026 +#endif
41027 +
41028 + }
41029 +#endif
41030 +
41031 if (N_MAGIC(ex) == OMAGIC) {
41032 unsigned long text_addr, map_size;
41033 loff_t pos;
41034 @@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41035
41036 down_write(&current->mm->mmap_sem);
41037 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41038 - PROT_READ | PROT_WRITE | PROT_EXEC,
41039 + PROT_READ | PROT_WRITE,
41040 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41041 fd_offset + ex.a_text);
41042 up_write(&current->mm->mmap_sem);
41043 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41044 index 07d096c..5e2a0b3 100644
41045 --- a/fs/binfmt_elf.c
41046 +++ b/fs/binfmt_elf.c
41047 @@ -32,6 +32,7 @@
41048 #include <linux/elf.h>
41049 #include <linux/utsname.h>
41050 #include <linux/coredump.h>
41051 +#include <linux/xattr.h>
41052 #include <asm/uaccess.h>
41053 #include <asm/param.h>
41054 #include <asm/page.h>
41055 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41056 #define elf_core_dump NULL
41057 #endif
41058
41059 +#ifdef CONFIG_PAX_MPROTECT
41060 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41061 +#endif
41062 +
41063 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41064 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41065 #else
41066 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
41067 .load_binary = load_elf_binary,
41068 .load_shlib = load_elf_library,
41069 .core_dump = elf_core_dump,
41070 +
41071 +#ifdef CONFIG_PAX_MPROTECT
41072 + .handle_mprotect= elf_handle_mprotect,
41073 +#endif
41074 +
41075 .min_coredump = ELF_EXEC_PAGESIZE,
41076 };
41077
41078 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
41079
41080 static int set_brk(unsigned long start, unsigned long end)
41081 {
41082 + unsigned long e = end;
41083 +
41084 start = ELF_PAGEALIGN(start);
41085 end = ELF_PAGEALIGN(end);
41086 if (end > start) {
41087 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
41088 if (BAD_ADDR(addr))
41089 return addr;
41090 }
41091 - current->mm->start_brk = current->mm->brk = end;
41092 + current->mm->start_brk = current->mm->brk = e;
41093 return 0;
41094 }
41095
41096 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41097 elf_addr_t __user *u_rand_bytes;
41098 const char *k_platform = ELF_PLATFORM;
41099 const char *k_base_platform = ELF_BASE_PLATFORM;
41100 - unsigned char k_rand_bytes[16];
41101 + u32 k_rand_bytes[4];
41102 int items;
41103 elf_addr_t *elf_info;
41104 int ei_index = 0;
41105 const struct cred *cred = current_cred();
41106 struct vm_area_struct *vma;
41107 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41108
41109 /*
41110 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41111 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41112 * Generate 16 random bytes for userspace PRNG seeding.
41113 */
41114 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41115 - u_rand_bytes = (elf_addr_t __user *)
41116 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41117 + srandom32(k_rand_bytes[0] ^ random32());
41118 + srandom32(k_rand_bytes[1] ^ random32());
41119 + srandom32(k_rand_bytes[2] ^ random32());
41120 + srandom32(k_rand_bytes[3] ^ random32());
41121 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41122 + u_rand_bytes = (elf_addr_t __user *) p;
41123 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41124 return -EFAULT;
41125
41126 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41127 return -EFAULT;
41128 current->mm->env_end = p;
41129
41130 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41131 +
41132 /* Put the elf_info on the stack in the right place. */
41133 sp = (elf_addr_t __user *)envp + 1;
41134 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41135 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41136 return -EFAULT;
41137 return 0;
41138 }
41139 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41140 {
41141 struct elf_phdr *elf_phdata;
41142 struct elf_phdr *eppnt;
41143 - unsigned long load_addr = 0;
41144 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41145 int load_addr_set = 0;
41146 unsigned long last_bss = 0, elf_bss = 0;
41147 - unsigned long error = ~0UL;
41148 + unsigned long error = -EINVAL;
41149 unsigned long total_size;
41150 int retval, i, size;
41151
41152 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41153 goto out_close;
41154 }
41155
41156 +#ifdef CONFIG_PAX_SEGMEXEC
41157 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41158 + pax_task_size = SEGMEXEC_TASK_SIZE;
41159 +#endif
41160 +
41161 eppnt = elf_phdata;
41162 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41163 if (eppnt->p_type == PT_LOAD) {
41164 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41165 k = load_addr + eppnt->p_vaddr;
41166 if (BAD_ADDR(k) ||
41167 eppnt->p_filesz > eppnt->p_memsz ||
41168 - eppnt->p_memsz > TASK_SIZE ||
41169 - TASK_SIZE - eppnt->p_memsz < k) {
41170 + eppnt->p_memsz > pax_task_size ||
41171 + pax_task_size - eppnt->p_memsz < k) {
41172 error = -ENOMEM;
41173 goto out_close;
41174 }
41175 @@ -528,6 +552,351 @@ out:
41176 return error;
41177 }
41178
41179 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41180 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41181 +{
41182 + unsigned long pax_flags = 0UL;
41183 +
41184 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41185 +
41186 +#ifdef CONFIG_PAX_PAGEEXEC
41187 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41188 + pax_flags |= MF_PAX_PAGEEXEC;
41189 +#endif
41190 +
41191 +#ifdef CONFIG_PAX_SEGMEXEC
41192 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41193 + pax_flags |= MF_PAX_SEGMEXEC;
41194 +#endif
41195 +
41196 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41197 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41198 + if ((__supported_pte_mask & _PAGE_NX))
41199 + pax_flags &= ~MF_PAX_SEGMEXEC;
41200 + else
41201 + pax_flags &= ~MF_PAX_PAGEEXEC;
41202 + }
41203 +#endif
41204 +
41205 +#ifdef CONFIG_PAX_EMUTRAMP
41206 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41207 + pax_flags |= MF_PAX_EMUTRAMP;
41208 +#endif
41209 +
41210 +#ifdef CONFIG_PAX_MPROTECT
41211 + if (elf_phdata->p_flags & PF_MPROTECT)
41212 + pax_flags |= MF_PAX_MPROTECT;
41213 +#endif
41214 +
41215 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41216 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41217 + pax_flags |= MF_PAX_RANDMMAP;
41218 +#endif
41219 +
41220 +#endif
41221 +
41222 + return pax_flags;
41223 +}
41224 +
41225 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41226 +{
41227 + unsigned long pax_flags = 0UL;
41228 +
41229 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41230 +
41231 +#ifdef CONFIG_PAX_PAGEEXEC
41232 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41233 + pax_flags |= MF_PAX_PAGEEXEC;
41234 +#endif
41235 +
41236 +#ifdef CONFIG_PAX_SEGMEXEC
41237 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41238 + pax_flags |= MF_PAX_SEGMEXEC;
41239 +#endif
41240 +
41241 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41242 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41243 + if ((__supported_pte_mask & _PAGE_NX))
41244 + pax_flags &= ~MF_PAX_SEGMEXEC;
41245 + else
41246 + pax_flags &= ~MF_PAX_PAGEEXEC;
41247 + }
41248 +#endif
41249 +
41250 +#ifdef CONFIG_PAX_EMUTRAMP
41251 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41252 + pax_flags |= MF_PAX_EMUTRAMP;
41253 +#endif
41254 +
41255 +#ifdef CONFIG_PAX_MPROTECT
41256 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41257 + pax_flags |= MF_PAX_MPROTECT;
41258 +#endif
41259 +
41260 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41261 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41262 + pax_flags |= MF_PAX_RANDMMAP;
41263 +#endif
41264 +
41265 +#endif
41266 +
41267 + return pax_flags;
41268 +}
41269 +
41270 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41271 +{
41272 + unsigned long pax_flags = 0UL;
41273 +
41274 +#ifdef CONFIG_PAX_EI_PAX
41275 +
41276 +#ifdef CONFIG_PAX_PAGEEXEC
41277 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41278 + pax_flags |= MF_PAX_PAGEEXEC;
41279 +#endif
41280 +
41281 +#ifdef CONFIG_PAX_SEGMEXEC
41282 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41283 + pax_flags |= MF_PAX_SEGMEXEC;
41284 +#endif
41285 +
41286 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41287 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41288 + if ((__supported_pte_mask & _PAGE_NX))
41289 + pax_flags &= ~MF_PAX_SEGMEXEC;
41290 + else
41291 + pax_flags &= ~MF_PAX_PAGEEXEC;
41292 + }
41293 +#endif
41294 +
41295 +#ifdef CONFIG_PAX_EMUTRAMP
41296 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41297 + pax_flags |= MF_PAX_EMUTRAMP;
41298 +#endif
41299 +
41300 +#ifdef CONFIG_PAX_MPROTECT
41301 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41302 + pax_flags |= MF_PAX_MPROTECT;
41303 +#endif
41304 +
41305 +#ifdef CONFIG_PAX_ASLR
41306 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41307 + pax_flags |= MF_PAX_RANDMMAP;
41308 +#endif
41309 +
41310 +#else
41311 +
41312 +#ifdef CONFIG_PAX_PAGEEXEC
41313 + pax_flags |= MF_PAX_PAGEEXEC;
41314 +#endif
41315 +
41316 +#ifdef CONFIG_PAX_MPROTECT
41317 + pax_flags |= MF_PAX_MPROTECT;
41318 +#endif
41319 +
41320 +#ifdef CONFIG_PAX_RANDMMAP
41321 + pax_flags |= MF_PAX_RANDMMAP;
41322 +#endif
41323 +
41324 +#ifdef CONFIG_PAX_SEGMEXEC
41325 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41326 + pax_flags &= ~MF_PAX_PAGEEXEC;
41327 + pax_flags |= MF_PAX_SEGMEXEC;
41328 + }
41329 +#endif
41330 +
41331 +#endif
41332 +
41333 + return pax_flags;
41334 +}
41335 +
41336 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41337 +{
41338 +
41339 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41340 + unsigned long i;
41341 +
41342 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41343 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41344 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41345 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41346 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41347 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41348 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41349 + return ~0UL;
41350 +
41351 +#ifdef CONFIG_PAX_SOFTMODE
41352 + if (pax_softmode)
41353 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41354 + else
41355 +#endif
41356 +
41357 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41358 + break;
41359 + }
41360 +#endif
41361 +
41362 + return ~0UL;
41363 +}
41364 +
41365 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41366 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41367 +{
41368 + unsigned long pax_flags = 0UL;
41369 +
41370 +#ifdef CONFIG_PAX_PAGEEXEC
41371 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41372 + pax_flags |= MF_PAX_PAGEEXEC;
41373 +#endif
41374 +
41375 +#ifdef CONFIG_PAX_SEGMEXEC
41376 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41377 + pax_flags |= MF_PAX_SEGMEXEC;
41378 +#endif
41379 +
41380 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41381 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41382 + if ((__supported_pte_mask & _PAGE_NX))
41383 + pax_flags &= ~MF_PAX_SEGMEXEC;
41384 + else
41385 + pax_flags &= ~MF_PAX_PAGEEXEC;
41386 + }
41387 +#endif
41388 +
41389 +#ifdef CONFIG_PAX_EMUTRAMP
41390 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41391 + pax_flags |= MF_PAX_EMUTRAMP;
41392 +#endif
41393 +
41394 +#ifdef CONFIG_PAX_MPROTECT
41395 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41396 + pax_flags |= MF_PAX_MPROTECT;
41397 +#endif
41398 +
41399 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41400 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41401 + pax_flags |= MF_PAX_RANDMMAP;
41402 +#endif
41403 +
41404 + return pax_flags;
41405 +}
41406 +
41407 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41408 +{
41409 + unsigned long pax_flags = 0UL;
41410 +
41411 +#ifdef CONFIG_PAX_PAGEEXEC
41412 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41413 + pax_flags |= MF_PAX_PAGEEXEC;
41414 +#endif
41415 +
41416 +#ifdef CONFIG_PAX_SEGMEXEC
41417 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41418 + pax_flags |= MF_PAX_SEGMEXEC;
41419 +#endif
41420 +
41421 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41422 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41423 + if ((__supported_pte_mask & _PAGE_NX))
41424 + pax_flags &= ~MF_PAX_SEGMEXEC;
41425 + else
41426 + pax_flags &= ~MF_PAX_PAGEEXEC;
41427 + }
41428 +#endif
41429 +
41430 +#ifdef CONFIG_PAX_EMUTRAMP
41431 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41432 + pax_flags |= MF_PAX_EMUTRAMP;
41433 +#endif
41434 +
41435 +#ifdef CONFIG_PAX_MPROTECT
41436 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41437 + pax_flags |= MF_PAX_MPROTECT;
41438 +#endif
41439 +
41440 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41441 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41442 + pax_flags |= MF_PAX_RANDMMAP;
41443 +#endif
41444 +
41445 + return pax_flags;
41446 +}
41447 +#endif
41448 +
41449 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41450 +{
41451 +
41452 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41453 + ssize_t xattr_size, i;
41454 + unsigned char xattr_value[5];
41455 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41456 +
41457 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41458 + if (xattr_size <= 0)
41459 + return ~0UL;
41460 +
41461 + for (i = 0; i < xattr_size; i++)
41462 + switch (xattr_value[i]) {
41463 + default:
41464 + return ~0UL;
41465 +
41466 +#define parse_flag(option1, option2, flag) \
41467 + case option1: \
41468 + pax_flags_hardmode |= MF_PAX_##flag; \
41469 + break; \
41470 + case option2: \
41471 + pax_flags_softmode |= MF_PAX_##flag; \
41472 + break;
41473 +
41474 + parse_flag('p', 'P', PAGEEXEC);
41475 + parse_flag('e', 'E', EMUTRAMP);
41476 + parse_flag('m', 'M', MPROTECT);
41477 + parse_flag('r', 'R', RANDMMAP);
41478 + parse_flag('s', 'S', SEGMEXEC);
41479 +
41480 +#undef parse_flag
41481 + }
41482 +
41483 + if (pax_flags_hardmode & pax_flags_softmode)
41484 + return ~0UL;
41485 +
41486 +#ifdef CONFIG_PAX_SOFTMODE
41487 + if (pax_softmode)
41488 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41489 + else
41490 +#endif
41491 +
41492 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41493 +#else
41494 + return ~0UL;
41495 +#endif
41496 +
41497 +}
41498 +
41499 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41500 +{
41501 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41502 +
41503 + pax_flags = pax_parse_ei_pax(elf_ex);
41504 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41505 + xattr_pax_flags = pax_parse_xattr_pax(file);
41506 +
41507 + if (pt_pax_flags == ~0UL)
41508 + pt_pax_flags = xattr_pax_flags;
41509 + else if (xattr_pax_flags == ~0UL)
41510 + xattr_pax_flags = pt_pax_flags;
41511 + if (pt_pax_flags != xattr_pax_flags)
41512 + return -EINVAL;
41513 + if (pt_pax_flags != ~0UL)
41514 + pax_flags = pt_pax_flags;
41515 +
41516 + if (0 > pax_check_flags(&pax_flags))
41517 + return -EINVAL;
41518 +
41519 + current->mm->pax_flags = pax_flags;
41520 + return 0;
41521 +}
41522 +#endif
41523 +
41524 /*
41525 * These are the functions used to load ELF style executables and shared
41526 * libraries. There is no binary dependent code anywhere else.
41527 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41528 {
41529 unsigned int random_variable = 0;
41530
41531 +#ifdef CONFIG_PAX_RANDUSTACK
41532 + if (randomize_va_space)
41533 + return stack_top - current->mm->delta_stack;
41534 +#endif
41535 +
41536 if ((current->flags & PF_RANDOMIZE) &&
41537 !(current->personality & ADDR_NO_RANDOMIZE)) {
41538 random_variable = get_random_int() & STACK_RND_MASK;
41539 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41540 unsigned long load_addr = 0, load_bias = 0;
41541 int load_addr_set = 0;
41542 char * elf_interpreter = NULL;
41543 - unsigned long error;
41544 + unsigned long error = 0;
41545 struct elf_phdr *elf_ppnt, *elf_phdata;
41546 unsigned long elf_bss, elf_brk;
41547 int retval, i;
41548 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41549 unsigned long start_code, end_code, start_data, end_data;
41550 unsigned long reloc_func_desc __maybe_unused = 0;
41551 int executable_stack = EXSTACK_DEFAULT;
41552 - unsigned long def_flags = 0;
41553 struct {
41554 struct elfhdr elf_ex;
41555 struct elfhdr interp_elf_ex;
41556 } *loc;
41557 + unsigned long pax_task_size = TASK_SIZE;
41558
41559 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41560 if (!loc) {
41561 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41562
41563 /* OK, This is the point of no return */
41564 current->flags &= ~PF_FORKNOEXEC;
41565 - current->mm->def_flags = def_flags;
41566 +
41567 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41568 + current->mm->pax_flags = 0UL;
41569 +#endif
41570 +
41571 +#ifdef CONFIG_PAX_DLRESOLVE
41572 + current->mm->call_dl_resolve = 0UL;
41573 +#endif
41574 +
41575 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41576 + current->mm->call_syscall = 0UL;
41577 +#endif
41578 +
41579 +#ifdef CONFIG_PAX_ASLR
41580 + current->mm->delta_mmap = 0UL;
41581 + current->mm->delta_stack = 0UL;
41582 +#endif
41583 +
41584 + current->mm->def_flags = 0;
41585 +
41586 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41587 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
41588 + send_sig(SIGKILL, current, 0);
41589 + goto out_free_dentry;
41590 + }
41591 +#endif
41592 +
41593 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
41594 + pax_set_initial_flags(bprm);
41595 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
41596 + if (pax_set_initial_flags_func)
41597 + (pax_set_initial_flags_func)(bprm);
41598 +#endif
41599 +
41600 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
41601 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
41602 + current->mm->context.user_cs_limit = PAGE_SIZE;
41603 + current->mm->def_flags |= VM_PAGEEXEC;
41604 + }
41605 +#endif
41606 +
41607 +#ifdef CONFIG_PAX_SEGMEXEC
41608 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
41609 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
41610 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
41611 + pax_task_size = SEGMEXEC_TASK_SIZE;
41612 + current->mm->def_flags |= VM_NOHUGEPAGE;
41613 + }
41614 +#endif
41615 +
41616 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
41617 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41618 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
41619 + put_cpu();
41620 + }
41621 +#endif
41622
41623 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
41624 may depend on the personality. */
41625 SET_PERSONALITY(loc->elf_ex);
41626 +
41627 +#ifdef CONFIG_PAX_ASLR
41628 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41629 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
41630 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
41631 + }
41632 +#endif
41633 +
41634 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41635 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41636 + executable_stack = EXSTACK_DISABLE_X;
41637 + current->personality &= ~READ_IMPLIES_EXEC;
41638 + } else
41639 +#endif
41640 +
41641 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
41642 current->personality |= READ_IMPLIES_EXEC;
41643
41644 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41645 #else
41646 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
41647 #endif
41648 +
41649 +#ifdef CONFIG_PAX_RANDMMAP
41650 + /* PaX: randomize base address at the default exe base if requested */
41651 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
41652 +#ifdef CONFIG_SPARC64
41653 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
41654 +#else
41655 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
41656 +#endif
41657 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
41658 + elf_flags |= MAP_FIXED;
41659 + }
41660 +#endif
41661 +
41662 }
41663
41664 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
41665 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41666 * allowed task size. Note that p_filesz must always be
41667 * <= p_memsz so it is only necessary to check p_memsz.
41668 */
41669 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41670 - elf_ppnt->p_memsz > TASK_SIZE ||
41671 - TASK_SIZE - elf_ppnt->p_memsz < k) {
41672 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41673 + elf_ppnt->p_memsz > pax_task_size ||
41674 + pax_task_size - elf_ppnt->p_memsz < k) {
41675 /* set_brk can never work. Avoid overflows. */
41676 send_sig(SIGKILL, current, 0);
41677 retval = -EINVAL;
41678 @@ -881,11 +1339,35 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41679 goto out_free_dentry;
41680 }
41681 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
41682 - send_sig(SIGSEGV, current, 0);
41683 - retval = -EFAULT; /* Nobody gets to see this, but.. */
41684 - goto out_free_dentry;
41685 + /*
41686 + * This bss-zeroing can fail if the ELF
41687 + * file specifies odd protections. So
41688 + * we don't check the return value
41689 + */
41690 }
41691
41692 +#ifdef CONFIG_PAX_RANDMMAP
41693 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41694 + unsigned long start, size;
41695 +
41696 + start = ELF_PAGEALIGN(elf_brk);
41697 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
41698 + down_write(&current->mm->mmap_sem);
41699 + retval = -ENOMEM;
41700 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
41701 + start = do_mmap(NULL, start, size, PROT_NONE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
41702 + retval = IS_ERR_VALUE(start) ? start : 0;
41703 + }
41704 + up_write(&current->mm->mmap_sem);
41705 + if (retval == 0)
41706 + retval = set_brk(start + size, start + size + PAGE_SIZE);
41707 + if (retval < 0) {
41708 + send_sig(SIGKILL, current, 0);
41709 + goto out_free_dentry;
41710 + }
41711 + }
41712 +#endif
41713 +
41714 if (elf_interpreter) {
41715 unsigned long uninitialized_var(interp_map_addr);
41716
41717 @@ -1098,7 +1580,7 @@ out:
41718 * Decide what to dump of a segment, part, all or none.
41719 */
41720 static unsigned long vma_dump_size(struct vm_area_struct *vma,
41721 - unsigned long mm_flags)
41722 + unsigned long mm_flags, long signr)
41723 {
41724 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
41725
41726 @@ -1132,7 +1614,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
41727 if (vma->vm_file == NULL)
41728 return 0;
41729
41730 - if (FILTER(MAPPED_PRIVATE))
41731 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
41732 goto whole;
41733
41734 /*
41735 @@ -1354,9 +1836,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
41736 {
41737 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
41738 int i = 0;
41739 - do
41740 + do {
41741 i += 2;
41742 - while (auxv[i - 2] != AT_NULL);
41743 + } while (auxv[i - 2] != AT_NULL);
41744 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
41745 }
41746
41747 @@ -1862,14 +2344,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
41748 }
41749
41750 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
41751 - unsigned long mm_flags)
41752 + struct coredump_params *cprm)
41753 {
41754 struct vm_area_struct *vma;
41755 size_t size = 0;
41756
41757 for (vma = first_vma(current, gate_vma); vma != NULL;
41758 vma = next_vma(vma, gate_vma))
41759 - size += vma_dump_size(vma, mm_flags);
41760 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41761 return size;
41762 }
41763
41764 @@ -1963,7 +2445,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41765
41766 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41767
41768 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
41769 + offset += elf_core_vma_data_size(gate_vma, cprm);
41770 offset += elf_core_extra_data_size();
41771 e_shoff = offset;
41772
41773 @@ -1977,10 +2459,12 @@ static int elf_core_dump(struct coredump_params *cprm)
41774 offset = dataoff;
41775
41776 size += sizeof(*elf);
41777 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41778 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
41779 goto end_coredump;
41780
41781 size += sizeof(*phdr4note);
41782 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41783 if (size > cprm->limit
41784 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
41785 goto end_coredump;
41786 @@ -1994,7 +2478,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41787 phdr.p_offset = offset;
41788 phdr.p_vaddr = vma->vm_start;
41789 phdr.p_paddr = 0;
41790 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
41791 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41792 phdr.p_memsz = vma->vm_end - vma->vm_start;
41793 offset += phdr.p_filesz;
41794 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
41795 @@ -2005,6 +2489,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41796 phdr.p_align = ELF_EXEC_PAGESIZE;
41797
41798 size += sizeof(phdr);
41799 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41800 if (size > cprm->limit
41801 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
41802 goto end_coredump;
41803 @@ -2029,7 +2514,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41804 unsigned long addr;
41805 unsigned long end;
41806
41807 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
41808 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41809
41810 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
41811 struct page *page;
41812 @@ -2038,6 +2523,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41813 page = get_dump_page(addr);
41814 if (page) {
41815 void *kaddr = kmap(page);
41816 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
41817 stop = ((size += PAGE_SIZE) > cprm->limit) ||
41818 !dump_write(cprm->file, kaddr,
41819 PAGE_SIZE);
41820 @@ -2055,6 +2541,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41821
41822 if (e_phnum == PN_XNUM) {
41823 size += sizeof(*shdr4extnum);
41824 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41825 if (size > cprm->limit
41826 || !dump_write(cprm->file, shdr4extnum,
41827 sizeof(*shdr4extnum)))
41828 @@ -2075,6 +2562,97 @@ out:
41829
41830 #endif /* CONFIG_ELF_CORE */
41831
41832 +#ifdef CONFIG_PAX_MPROTECT
41833 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
41834 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
41835 + * we'll remove VM_MAYWRITE for good on RELRO segments.
41836 + *
41837 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
41838 + * basis because we want to allow the common case and not the special ones.
41839 + */
41840 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
41841 +{
41842 + struct elfhdr elf_h;
41843 + struct elf_phdr elf_p;
41844 + unsigned long i;
41845 + unsigned long oldflags;
41846 + bool is_textrel_rw, is_textrel_rx, is_relro;
41847 +
41848 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
41849 + return;
41850 +
41851 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
41852 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
41853 +
41854 +#ifdef CONFIG_PAX_ELFRELOCS
41855 + /* possible TEXTREL */
41856 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
41857 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
41858 +#else
41859 + is_textrel_rw = false;
41860 + is_textrel_rx = false;
41861 +#endif
41862 +
41863 + /* possible RELRO */
41864 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
41865 +
41866 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
41867 + return;
41868 +
41869 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
41870 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
41871 +
41872 +#ifdef CONFIG_PAX_ETEXECRELOCS
41873 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41874 +#else
41875 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
41876 +#endif
41877 +
41878 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41879 + !elf_check_arch(&elf_h) ||
41880 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
41881 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
41882 + return;
41883 +
41884 + for (i = 0UL; i < elf_h.e_phnum; i++) {
41885 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
41886 + return;
41887 + switch (elf_p.p_type) {
41888 + case PT_DYNAMIC:
41889 + if (!is_textrel_rw && !is_textrel_rx)
41890 + continue;
41891 + i = 0UL;
41892 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
41893 + elf_dyn dyn;
41894 +
41895 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
41896 + return;
41897 + if (dyn.d_tag == DT_NULL)
41898 + return;
41899 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
41900 + gr_log_textrel(vma);
41901 + if (is_textrel_rw)
41902 + vma->vm_flags |= VM_MAYWRITE;
41903 + else
41904 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
41905 + vma->vm_flags &= ~VM_MAYWRITE;
41906 + return;
41907 + }
41908 + i++;
41909 + }
41910 + return;
41911 +
41912 + case PT_GNU_RELRO:
41913 + if (!is_relro)
41914 + continue;
41915 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
41916 + vma->vm_flags &= ~VM_MAYWRITE;
41917 + return;
41918 + }
41919 + }
41920 +}
41921 +#endif
41922 +
41923 static int __init init_elf_binfmt(void)
41924 {
41925 return register_binfmt(&elf_format);
41926 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
41927 index 1bffbe0..c8c283e 100644
41928 --- a/fs/binfmt_flat.c
41929 +++ b/fs/binfmt_flat.c
41930 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
41931 realdatastart = (unsigned long) -ENOMEM;
41932 printk("Unable to allocate RAM for process data, errno %d\n",
41933 (int)-realdatastart);
41934 + down_write(&current->mm->mmap_sem);
41935 do_munmap(current->mm, textpos, text_len);
41936 + up_write(&current->mm->mmap_sem);
41937 ret = realdatastart;
41938 goto err;
41939 }
41940 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
41941 }
41942 if (IS_ERR_VALUE(result)) {
41943 printk("Unable to read data+bss, errno %d\n", (int)-result);
41944 + down_write(&current->mm->mmap_sem);
41945 do_munmap(current->mm, textpos, text_len);
41946 do_munmap(current->mm, realdatastart, len);
41947 + up_write(&current->mm->mmap_sem);
41948 ret = result;
41949 goto err;
41950 }
41951 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
41952 }
41953 if (IS_ERR_VALUE(result)) {
41954 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
41955 + down_write(&current->mm->mmap_sem);
41956 do_munmap(current->mm, textpos, text_len + data_len + extra +
41957 MAX_SHARED_LIBS * sizeof(unsigned long));
41958 + up_write(&current->mm->mmap_sem);
41959 ret = result;
41960 goto err;
41961 }
41962 diff --git a/fs/bio.c b/fs/bio.c
41963 index b980ecd..74800bf 100644
41964 --- a/fs/bio.c
41965 +++ b/fs/bio.c
41966 @@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
41967 /*
41968 * Overflow, abort
41969 */
41970 - if (end < start)
41971 + if (end < start || end - start > INT_MAX - nr_pages)
41972 return ERR_PTR(-EINVAL);
41973
41974 nr_pages += end - start;
41975 @@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
41976 const int read = bio_data_dir(bio) == READ;
41977 struct bio_map_data *bmd = bio->bi_private;
41978 int i;
41979 - char *p = bmd->sgvecs[0].iov_base;
41980 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
41981
41982 __bio_for_each_segment(bvec, bio, i, 0) {
41983 char *addr = page_address(bvec->bv_page);
41984 diff --git a/fs/block_dev.c b/fs/block_dev.c
41985 index 5e9f198..6bf9b1c 100644
41986 --- a/fs/block_dev.c
41987 +++ b/fs/block_dev.c
41988 @@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
41989 else if (bdev->bd_contains == bdev)
41990 return true; /* is a whole device which isn't held */
41991
41992 - else if (whole->bd_holder == bd_may_claim)
41993 + else if (whole->bd_holder == (void *)bd_may_claim)
41994 return true; /* is a partition of a device that is being partitioned */
41995 else if (whole->bd_holder != NULL)
41996 return false; /* is a partition of a held device */
41997 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
41998 index d986824..af1befd 100644
41999 --- a/fs/btrfs/check-integrity.c
42000 +++ b/fs/btrfs/check-integrity.c
42001 @@ -157,7 +157,7 @@ struct btrfsic_block {
42002 union {
42003 bio_end_io_t *bio;
42004 bh_end_io_t *bh;
42005 - } orig_bio_bh_end_io;
42006 + } __no_const orig_bio_bh_end_io;
42007 int submit_bio_bh_rw;
42008 u64 flush_gen; /* only valid if !never_written */
42009 };
42010 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42011 index 0639a55..7d9e07f 100644
42012 --- a/fs/btrfs/ctree.c
42013 +++ b/fs/btrfs/ctree.c
42014 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42015 free_extent_buffer(buf);
42016 add_root_to_dirty_list(root);
42017 } else {
42018 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42019 - parent_start = parent->start;
42020 - else
42021 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42022 + if (parent)
42023 + parent_start = parent->start;
42024 + else
42025 + parent_start = 0;
42026 + } else
42027 parent_start = 0;
42028
42029 WARN_ON(trans->transid != btrfs_header_generation(parent));
42030 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42031 index 892b347..b3db246 100644
42032 --- a/fs/btrfs/inode.c
42033 +++ b/fs/btrfs/inode.c
42034 @@ -6930,7 +6930,7 @@ fail:
42035 return -ENOMEM;
42036 }
42037
42038 -static int btrfs_getattr(struct vfsmount *mnt,
42039 +int btrfs_getattr(struct vfsmount *mnt,
42040 struct dentry *dentry, struct kstat *stat)
42041 {
42042 struct inode *inode = dentry->d_inode;
42043 @@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42044 return 0;
42045 }
42046
42047 +EXPORT_SYMBOL(btrfs_getattr);
42048 +
42049 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42050 +{
42051 + return BTRFS_I(inode)->root->anon_dev;
42052 +}
42053 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42054 +
42055 /*
42056 * If a file is moved, it will inherit the cow and compression flags of the new
42057 * directory.
42058 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42059 index d8b5471..e5463d7 100644
42060 --- a/fs/btrfs/ioctl.c
42061 +++ b/fs/btrfs/ioctl.c
42062 @@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42063 for (i = 0; i < num_types; i++) {
42064 struct btrfs_space_info *tmp;
42065
42066 + /* Don't copy in more than we allocated */
42067 if (!slot_count)
42068 break;
42069
42070 + slot_count--;
42071 +
42072 info = NULL;
42073 rcu_read_lock();
42074 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42075 @@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42076 memcpy(dest, &space, sizeof(space));
42077 dest++;
42078 space_args.total_spaces++;
42079 - slot_count--;
42080 }
42081 - if (!slot_count)
42082 - break;
42083 }
42084 up_read(&info->groups_sem);
42085 }
42086
42087 - user_dest = (struct btrfs_ioctl_space_info *)
42088 + user_dest = (struct btrfs_ioctl_space_info __user *)
42089 (arg + sizeof(struct btrfs_ioctl_space_args));
42090
42091 if (copy_to_user(user_dest, dest_orig, alloc_size))
42092 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42093 index 8c1aae2..1e46446 100644
42094 --- a/fs/btrfs/relocation.c
42095 +++ b/fs/btrfs/relocation.c
42096 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42097 }
42098 spin_unlock(&rc->reloc_root_tree.lock);
42099
42100 - BUG_ON((struct btrfs_root *)node->data != root);
42101 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42102
42103 if (!del) {
42104 spin_lock(&rc->reloc_root_tree.lock);
42105 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42106 index 622f469..e8d2d55 100644
42107 --- a/fs/cachefiles/bind.c
42108 +++ b/fs/cachefiles/bind.c
42109 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42110 args);
42111
42112 /* start by checking things over */
42113 - ASSERT(cache->fstop_percent >= 0 &&
42114 - cache->fstop_percent < cache->fcull_percent &&
42115 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42116 cache->fcull_percent < cache->frun_percent &&
42117 cache->frun_percent < 100);
42118
42119 - ASSERT(cache->bstop_percent >= 0 &&
42120 - cache->bstop_percent < cache->bcull_percent &&
42121 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42122 cache->bcull_percent < cache->brun_percent &&
42123 cache->brun_percent < 100);
42124
42125 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42126 index 0a1467b..6a53245 100644
42127 --- a/fs/cachefiles/daemon.c
42128 +++ b/fs/cachefiles/daemon.c
42129 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42130 if (n > buflen)
42131 return -EMSGSIZE;
42132
42133 - if (copy_to_user(_buffer, buffer, n) != 0)
42134 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42135 return -EFAULT;
42136
42137 return n;
42138 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42139 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42140 return -EIO;
42141
42142 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42143 + if (datalen > PAGE_SIZE - 1)
42144 return -EOPNOTSUPP;
42145
42146 /* drag the command string into the kernel so we can parse it */
42147 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42148 if (args[0] != '%' || args[1] != '\0')
42149 return -EINVAL;
42150
42151 - if (fstop < 0 || fstop >= cache->fcull_percent)
42152 + if (fstop >= cache->fcull_percent)
42153 return cachefiles_daemon_range_error(cache, args);
42154
42155 cache->fstop_percent = fstop;
42156 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42157 if (args[0] != '%' || args[1] != '\0')
42158 return -EINVAL;
42159
42160 - if (bstop < 0 || bstop >= cache->bcull_percent)
42161 + if (bstop >= cache->bcull_percent)
42162 return cachefiles_daemon_range_error(cache, args);
42163
42164 cache->bstop_percent = bstop;
42165 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42166 index bd6bc1b..b627b53 100644
42167 --- a/fs/cachefiles/internal.h
42168 +++ b/fs/cachefiles/internal.h
42169 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42170 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42171 struct rb_root active_nodes; /* active nodes (can't be culled) */
42172 rwlock_t active_lock; /* lock for active_nodes */
42173 - atomic_t gravecounter; /* graveyard uniquifier */
42174 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42175 unsigned frun_percent; /* when to stop culling (% files) */
42176 unsigned fcull_percent; /* when to start culling (% files) */
42177 unsigned fstop_percent; /* when to stop allocating (% files) */
42178 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42179 * proc.c
42180 */
42181 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42182 -extern atomic_t cachefiles_lookup_histogram[HZ];
42183 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42184 -extern atomic_t cachefiles_create_histogram[HZ];
42185 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42186 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42187 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42188
42189 extern int __init cachefiles_proc_init(void);
42190 extern void cachefiles_proc_cleanup(void);
42191 static inline
42192 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42193 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42194 {
42195 unsigned long jif = jiffies - start_jif;
42196 if (jif >= HZ)
42197 jif = HZ - 1;
42198 - atomic_inc(&histogram[jif]);
42199 + atomic_inc_unchecked(&histogram[jif]);
42200 }
42201
42202 #else
42203 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42204 index a0358c2..d6137f2 100644
42205 --- a/fs/cachefiles/namei.c
42206 +++ b/fs/cachefiles/namei.c
42207 @@ -318,7 +318,7 @@ try_again:
42208 /* first step is to make up a grave dentry in the graveyard */
42209 sprintf(nbuffer, "%08x%08x",
42210 (uint32_t) get_seconds(),
42211 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42212 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42213
42214 /* do the multiway lock magic */
42215 trap = lock_rename(cache->graveyard, dir);
42216 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42217 index eccd339..4c1d995 100644
42218 --- a/fs/cachefiles/proc.c
42219 +++ b/fs/cachefiles/proc.c
42220 @@ -14,9 +14,9 @@
42221 #include <linux/seq_file.h>
42222 #include "internal.h"
42223
42224 -atomic_t cachefiles_lookup_histogram[HZ];
42225 -atomic_t cachefiles_mkdir_histogram[HZ];
42226 -atomic_t cachefiles_create_histogram[HZ];
42227 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42228 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42229 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42230
42231 /*
42232 * display the latency histogram
42233 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42234 return 0;
42235 default:
42236 index = (unsigned long) v - 3;
42237 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42238 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42239 - z = atomic_read(&cachefiles_create_histogram[index]);
42240 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42241 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42242 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42243 if (x == 0 && y == 0 && z == 0)
42244 return 0;
42245
42246 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42247 index 0e3c092..818480e 100644
42248 --- a/fs/cachefiles/rdwr.c
42249 +++ b/fs/cachefiles/rdwr.c
42250 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42251 old_fs = get_fs();
42252 set_fs(KERNEL_DS);
42253 ret = file->f_op->write(
42254 - file, (const void __user *) data, len, &pos);
42255 + file, (const void __force_user *) data, len, &pos);
42256 set_fs(old_fs);
42257 kunmap(page);
42258 if (ret != len)
42259 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42260 index 3e8094b..cb3ff3d 100644
42261 --- a/fs/ceph/dir.c
42262 +++ b/fs/ceph/dir.c
42263 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42264 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42265 struct ceph_mds_client *mdsc = fsc->mdsc;
42266 unsigned frag = fpos_frag(filp->f_pos);
42267 - int off = fpos_off(filp->f_pos);
42268 + unsigned int off = fpos_off(filp->f_pos);
42269 int err;
42270 u32 ftype;
42271 struct ceph_mds_reply_info_parsed *rinfo;
42272 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42273 if (nd &&
42274 (nd->flags & LOOKUP_OPEN) &&
42275 !(nd->intent.open.flags & O_CREAT)) {
42276 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
42277 + int mode = nd->intent.open.create_mode & ~current_umask();
42278 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42279 }
42280
42281 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42282 index 24b3dfc..3cd5454 100644
42283 --- a/fs/cifs/cifs_debug.c
42284 +++ b/fs/cifs/cifs_debug.c
42285 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42286
42287 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42288 #ifdef CONFIG_CIFS_STATS2
42289 - atomic_set(&totBufAllocCount, 0);
42290 - atomic_set(&totSmBufAllocCount, 0);
42291 + atomic_set_unchecked(&totBufAllocCount, 0);
42292 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42293 #endif /* CONFIG_CIFS_STATS2 */
42294 spin_lock(&cifs_tcp_ses_lock);
42295 list_for_each(tmp1, &cifs_tcp_ses_list) {
42296 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42297 tcon = list_entry(tmp3,
42298 struct cifs_tcon,
42299 tcon_list);
42300 - atomic_set(&tcon->num_smbs_sent, 0);
42301 - atomic_set(&tcon->num_writes, 0);
42302 - atomic_set(&tcon->num_reads, 0);
42303 - atomic_set(&tcon->num_oplock_brks, 0);
42304 - atomic_set(&tcon->num_opens, 0);
42305 - atomic_set(&tcon->num_posixopens, 0);
42306 - atomic_set(&tcon->num_posixmkdirs, 0);
42307 - atomic_set(&tcon->num_closes, 0);
42308 - atomic_set(&tcon->num_deletes, 0);
42309 - atomic_set(&tcon->num_mkdirs, 0);
42310 - atomic_set(&tcon->num_rmdirs, 0);
42311 - atomic_set(&tcon->num_renames, 0);
42312 - atomic_set(&tcon->num_t2renames, 0);
42313 - atomic_set(&tcon->num_ffirst, 0);
42314 - atomic_set(&tcon->num_fnext, 0);
42315 - atomic_set(&tcon->num_fclose, 0);
42316 - atomic_set(&tcon->num_hardlinks, 0);
42317 - atomic_set(&tcon->num_symlinks, 0);
42318 - atomic_set(&tcon->num_locks, 0);
42319 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42320 + atomic_set_unchecked(&tcon->num_writes, 0);
42321 + atomic_set_unchecked(&tcon->num_reads, 0);
42322 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42323 + atomic_set_unchecked(&tcon->num_opens, 0);
42324 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42325 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42326 + atomic_set_unchecked(&tcon->num_closes, 0);
42327 + atomic_set_unchecked(&tcon->num_deletes, 0);
42328 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42329 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42330 + atomic_set_unchecked(&tcon->num_renames, 0);
42331 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42332 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42333 + atomic_set_unchecked(&tcon->num_fnext, 0);
42334 + atomic_set_unchecked(&tcon->num_fclose, 0);
42335 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42336 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42337 + atomic_set_unchecked(&tcon->num_locks, 0);
42338 }
42339 }
42340 }
42341 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42342 smBufAllocCount.counter, cifs_min_small);
42343 #ifdef CONFIG_CIFS_STATS2
42344 seq_printf(m, "Total Large %d Small %d Allocations\n",
42345 - atomic_read(&totBufAllocCount),
42346 - atomic_read(&totSmBufAllocCount));
42347 + atomic_read_unchecked(&totBufAllocCount),
42348 + atomic_read_unchecked(&totSmBufAllocCount));
42349 #endif /* CONFIG_CIFS_STATS2 */
42350
42351 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42352 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42353 if (tcon->need_reconnect)
42354 seq_puts(m, "\tDISCONNECTED ");
42355 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42356 - atomic_read(&tcon->num_smbs_sent),
42357 - atomic_read(&tcon->num_oplock_brks));
42358 + atomic_read_unchecked(&tcon->num_smbs_sent),
42359 + atomic_read_unchecked(&tcon->num_oplock_brks));
42360 seq_printf(m, "\nReads: %d Bytes: %lld",
42361 - atomic_read(&tcon->num_reads),
42362 + atomic_read_unchecked(&tcon->num_reads),
42363 (long long)(tcon->bytes_read));
42364 seq_printf(m, "\nWrites: %d Bytes: %lld",
42365 - atomic_read(&tcon->num_writes),
42366 + atomic_read_unchecked(&tcon->num_writes),
42367 (long long)(tcon->bytes_written));
42368 seq_printf(m, "\nFlushes: %d",
42369 - atomic_read(&tcon->num_flushes));
42370 + atomic_read_unchecked(&tcon->num_flushes));
42371 seq_printf(m, "\nLocks: %d HardLinks: %d "
42372 "Symlinks: %d",
42373 - atomic_read(&tcon->num_locks),
42374 - atomic_read(&tcon->num_hardlinks),
42375 - atomic_read(&tcon->num_symlinks));
42376 + atomic_read_unchecked(&tcon->num_locks),
42377 + atomic_read_unchecked(&tcon->num_hardlinks),
42378 + atomic_read_unchecked(&tcon->num_symlinks));
42379 seq_printf(m, "\nOpens: %d Closes: %d "
42380 "Deletes: %d",
42381 - atomic_read(&tcon->num_opens),
42382 - atomic_read(&tcon->num_closes),
42383 - atomic_read(&tcon->num_deletes));
42384 + atomic_read_unchecked(&tcon->num_opens),
42385 + atomic_read_unchecked(&tcon->num_closes),
42386 + atomic_read_unchecked(&tcon->num_deletes));
42387 seq_printf(m, "\nPosix Opens: %d "
42388 "Posix Mkdirs: %d",
42389 - atomic_read(&tcon->num_posixopens),
42390 - atomic_read(&tcon->num_posixmkdirs));
42391 + atomic_read_unchecked(&tcon->num_posixopens),
42392 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42393 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42394 - atomic_read(&tcon->num_mkdirs),
42395 - atomic_read(&tcon->num_rmdirs));
42396 + atomic_read_unchecked(&tcon->num_mkdirs),
42397 + atomic_read_unchecked(&tcon->num_rmdirs));
42398 seq_printf(m, "\nRenames: %d T2 Renames %d",
42399 - atomic_read(&tcon->num_renames),
42400 - atomic_read(&tcon->num_t2renames));
42401 + atomic_read_unchecked(&tcon->num_renames),
42402 + atomic_read_unchecked(&tcon->num_t2renames));
42403 seq_printf(m, "\nFindFirst: %d FNext %d "
42404 "FClose %d",
42405 - atomic_read(&tcon->num_ffirst),
42406 - atomic_read(&tcon->num_fnext),
42407 - atomic_read(&tcon->num_fclose));
42408 + atomic_read_unchecked(&tcon->num_ffirst),
42409 + atomic_read_unchecked(&tcon->num_fnext),
42410 + atomic_read_unchecked(&tcon->num_fclose));
42411 }
42412 }
42413 }
42414 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42415 index b1fd382..df45435 100644
42416 --- a/fs/cifs/cifsfs.c
42417 +++ b/fs/cifs/cifsfs.c
42418 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
42419 cifs_req_cachep = kmem_cache_create("cifs_request",
42420 CIFSMaxBufSize +
42421 MAX_CIFS_HDR_SIZE, 0,
42422 - SLAB_HWCACHE_ALIGN, NULL);
42423 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42424 if (cifs_req_cachep == NULL)
42425 return -ENOMEM;
42426
42427 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
42428 efficient to alloc 1 per page off the slab compared to 17K (5page)
42429 alloc of large cifs buffers even when page debugging is on */
42430 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42431 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42432 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42433 NULL);
42434 if (cifs_sm_req_cachep == NULL) {
42435 mempool_destroy(cifs_req_poolp);
42436 @@ -1101,8 +1101,8 @@ init_cifs(void)
42437 atomic_set(&bufAllocCount, 0);
42438 atomic_set(&smBufAllocCount, 0);
42439 #ifdef CONFIG_CIFS_STATS2
42440 - atomic_set(&totBufAllocCount, 0);
42441 - atomic_set(&totSmBufAllocCount, 0);
42442 + atomic_set_unchecked(&totBufAllocCount, 0);
42443 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42444 #endif /* CONFIG_CIFS_STATS2 */
42445
42446 atomic_set(&midCount, 0);
42447 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42448 index 76e7d8b..4814992 100644
42449 --- a/fs/cifs/cifsglob.h
42450 +++ b/fs/cifs/cifsglob.h
42451 @@ -392,28 +392,28 @@ struct cifs_tcon {
42452 __u16 Flags; /* optional support bits */
42453 enum statusEnum tidStatus;
42454 #ifdef CONFIG_CIFS_STATS
42455 - atomic_t num_smbs_sent;
42456 - atomic_t num_writes;
42457 - atomic_t num_reads;
42458 - atomic_t num_flushes;
42459 - atomic_t num_oplock_brks;
42460 - atomic_t num_opens;
42461 - atomic_t num_closes;
42462 - atomic_t num_deletes;
42463 - atomic_t num_mkdirs;
42464 - atomic_t num_posixopens;
42465 - atomic_t num_posixmkdirs;
42466 - atomic_t num_rmdirs;
42467 - atomic_t num_renames;
42468 - atomic_t num_t2renames;
42469 - atomic_t num_ffirst;
42470 - atomic_t num_fnext;
42471 - atomic_t num_fclose;
42472 - atomic_t num_hardlinks;
42473 - atomic_t num_symlinks;
42474 - atomic_t num_locks;
42475 - atomic_t num_acl_get;
42476 - atomic_t num_acl_set;
42477 + atomic_unchecked_t num_smbs_sent;
42478 + atomic_unchecked_t num_writes;
42479 + atomic_unchecked_t num_reads;
42480 + atomic_unchecked_t num_flushes;
42481 + atomic_unchecked_t num_oplock_brks;
42482 + atomic_unchecked_t num_opens;
42483 + atomic_unchecked_t num_closes;
42484 + atomic_unchecked_t num_deletes;
42485 + atomic_unchecked_t num_mkdirs;
42486 + atomic_unchecked_t num_posixopens;
42487 + atomic_unchecked_t num_posixmkdirs;
42488 + atomic_unchecked_t num_rmdirs;
42489 + atomic_unchecked_t num_renames;
42490 + atomic_unchecked_t num_t2renames;
42491 + atomic_unchecked_t num_ffirst;
42492 + atomic_unchecked_t num_fnext;
42493 + atomic_unchecked_t num_fclose;
42494 + atomic_unchecked_t num_hardlinks;
42495 + atomic_unchecked_t num_symlinks;
42496 + atomic_unchecked_t num_locks;
42497 + atomic_unchecked_t num_acl_get;
42498 + atomic_unchecked_t num_acl_set;
42499 #ifdef CONFIG_CIFS_STATS2
42500 unsigned long long time_writes;
42501 unsigned long long time_reads;
42502 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
42503 }
42504
42505 #ifdef CONFIG_CIFS_STATS
42506 -#define cifs_stats_inc atomic_inc
42507 +#define cifs_stats_inc atomic_inc_unchecked
42508
42509 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42510 unsigned int bytes)
42511 @@ -987,8 +987,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42512 /* Various Debug counters */
42513 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42514 #ifdef CONFIG_CIFS_STATS2
42515 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42516 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42517 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42518 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42519 #endif
42520 GLOBAL_EXTERN atomic_t smBufAllocCount;
42521 GLOBAL_EXTERN atomic_t midCount;
42522 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42523 index 6b0e064..94e6c3c 100644
42524 --- a/fs/cifs/link.c
42525 +++ b/fs/cifs/link.c
42526 @@ -600,7 +600,7 @@ symlink_exit:
42527
42528 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42529 {
42530 - char *p = nd_get_link(nd);
42531 + const char *p = nd_get_link(nd);
42532 if (!IS_ERR(p))
42533 kfree(p);
42534 }
42535 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42536 index 703ef5c..2a44ed5 100644
42537 --- a/fs/cifs/misc.c
42538 +++ b/fs/cifs/misc.c
42539 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42540 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42541 atomic_inc(&bufAllocCount);
42542 #ifdef CONFIG_CIFS_STATS2
42543 - atomic_inc(&totBufAllocCount);
42544 + atomic_inc_unchecked(&totBufAllocCount);
42545 #endif /* CONFIG_CIFS_STATS2 */
42546 }
42547
42548 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42549 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42550 atomic_inc(&smBufAllocCount);
42551 #ifdef CONFIG_CIFS_STATS2
42552 - atomic_inc(&totSmBufAllocCount);
42553 + atomic_inc_unchecked(&totSmBufAllocCount);
42554 #endif /* CONFIG_CIFS_STATS2 */
42555
42556 }
42557 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42558 index 6901578..d402eb5 100644
42559 --- a/fs/coda/cache.c
42560 +++ b/fs/coda/cache.c
42561 @@ -24,7 +24,7 @@
42562 #include "coda_linux.h"
42563 #include "coda_cache.h"
42564
42565 -static atomic_t permission_epoch = ATOMIC_INIT(0);
42566 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42567
42568 /* replace or extend an acl cache hit */
42569 void coda_cache_enter(struct inode *inode, int mask)
42570 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42571 struct coda_inode_info *cii = ITOC(inode);
42572
42573 spin_lock(&cii->c_lock);
42574 - cii->c_cached_epoch = atomic_read(&permission_epoch);
42575 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
42576 if (cii->c_uid != current_fsuid()) {
42577 cii->c_uid = current_fsuid();
42578 cii->c_cached_perm = mask;
42579 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
42580 {
42581 struct coda_inode_info *cii = ITOC(inode);
42582 spin_lock(&cii->c_lock);
42583 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
42584 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
42585 spin_unlock(&cii->c_lock);
42586 }
42587
42588 /* remove all acl caches */
42589 void coda_cache_clear_all(struct super_block *sb)
42590 {
42591 - atomic_inc(&permission_epoch);
42592 + atomic_inc_unchecked(&permission_epoch);
42593 }
42594
42595
42596 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
42597 spin_lock(&cii->c_lock);
42598 hit = (mask & cii->c_cached_perm) == mask &&
42599 cii->c_uid == current_fsuid() &&
42600 - cii->c_cached_epoch == atomic_read(&permission_epoch);
42601 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
42602 spin_unlock(&cii->c_lock);
42603
42604 return hit;
42605 diff --git a/fs/compat.c b/fs/compat.c
42606 index 07880ba..3fb2862 100644
42607 --- a/fs/compat.c
42608 +++ b/fs/compat.c
42609 @@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
42610
42611 set_fs(KERNEL_DS);
42612 /* The __user pointer cast is valid because of the set_fs() */
42613 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
42614 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
42615 set_fs(oldfs);
42616 /* truncating is ok because it's a user address */
42617 if (!ret)
42618 @@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
42619 goto out;
42620
42621 ret = -EINVAL;
42622 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
42623 + if (nr_segs > UIO_MAXIOV)
42624 goto out;
42625 if (nr_segs > fast_segs) {
42626 ret = -ENOMEM;
42627 @@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
42628
42629 struct compat_readdir_callback {
42630 struct compat_old_linux_dirent __user *dirent;
42631 + struct file * file;
42632 int result;
42633 };
42634
42635 @@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
42636 buf->result = -EOVERFLOW;
42637 return -EOVERFLOW;
42638 }
42639 +
42640 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42641 + return 0;
42642 +
42643 buf->result++;
42644 dirent = buf->dirent;
42645 if (!access_ok(VERIFY_WRITE, dirent,
42646 @@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
42647
42648 buf.result = 0;
42649 buf.dirent = dirent;
42650 + buf.file = file;
42651
42652 error = vfs_readdir(file, compat_fillonedir, &buf);
42653 if (buf.result)
42654 @@ -901,6 +907,7 @@ struct compat_linux_dirent {
42655 struct compat_getdents_callback {
42656 struct compat_linux_dirent __user *current_dir;
42657 struct compat_linux_dirent __user *previous;
42658 + struct file * file;
42659 int count;
42660 int error;
42661 };
42662 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
42663 buf->error = -EOVERFLOW;
42664 return -EOVERFLOW;
42665 }
42666 +
42667 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42668 + return 0;
42669 +
42670 dirent = buf->previous;
42671 if (dirent) {
42672 if (__put_user(offset, &dirent->d_off))
42673 @@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
42674 buf.previous = NULL;
42675 buf.count = count;
42676 buf.error = 0;
42677 + buf.file = file;
42678
42679 error = vfs_readdir(file, compat_filldir, &buf);
42680 if (error >= 0)
42681 @@ -990,6 +1002,7 @@ out:
42682 struct compat_getdents_callback64 {
42683 struct linux_dirent64 __user *current_dir;
42684 struct linux_dirent64 __user *previous;
42685 + struct file * file;
42686 int count;
42687 int error;
42688 };
42689 @@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
42690 buf->error = -EINVAL; /* only used if we fail.. */
42691 if (reclen > buf->count)
42692 return -EINVAL;
42693 +
42694 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42695 + return 0;
42696 +
42697 dirent = buf->previous;
42698
42699 if (dirent) {
42700 @@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
42701 buf.previous = NULL;
42702 buf.count = count;
42703 buf.error = 0;
42704 + buf.file = file;
42705
42706 error = vfs_readdir(file, compat_filldir64, &buf);
42707 if (error >= 0)
42708 error = buf.error;
42709 lastdirent = buf.previous;
42710 if (lastdirent) {
42711 - typeof(lastdirent->d_off) d_off = file->f_pos;
42712 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
42713 if (__put_user_unaligned(d_off, &lastdirent->d_off))
42714 error = -EFAULT;
42715 else
42716 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
42717 index 112e45a..b59845b 100644
42718 --- a/fs/compat_binfmt_elf.c
42719 +++ b/fs/compat_binfmt_elf.c
42720 @@ -30,11 +30,13 @@
42721 #undef elf_phdr
42722 #undef elf_shdr
42723 #undef elf_note
42724 +#undef elf_dyn
42725 #undef elf_addr_t
42726 #define elfhdr elf32_hdr
42727 #define elf_phdr elf32_phdr
42728 #define elf_shdr elf32_shdr
42729 #define elf_note elf32_note
42730 +#define elf_dyn Elf32_Dyn
42731 #define elf_addr_t Elf32_Addr
42732
42733 /*
42734 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
42735 index a26bea1..ae23e72 100644
42736 --- a/fs/compat_ioctl.c
42737 +++ b/fs/compat_ioctl.c
42738 @@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
42739
42740 err = get_user(palp, &up->palette);
42741 err |= get_user(length, &up->length);
42742 + if (err)
42743 + return -EFAULT;
42744
42745 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
42746 err = put_user(compat_ptr(palp), &up_native->palette);
42747 @@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
42748 return -EFAULT;
42749 if (__get_user(udata, &ss32->iomem_base))
42750 return -EFAULT;
42751 - ss.iomem_base = compat_ptr(udata);
42752 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
42753 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
42754 __get_user(ss.port_high, &ss32->port_high))
42755 return -EFAULT;
42756 @@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
42757 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
42758 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
42759 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
42760 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42761 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42762 return -EFAULT;
42763
42764 return ioctl_preallocate(file, p);
42765 @@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
42766 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
42767 {
42768 unsigned int a, b;
42769 - a = *(unsigned int *)p;
42770 - b = *(unsigned int *)q;
42771 + a = *(const unsigned int *)p;
42772 + b = *(const unsigned int *)q;
42773 if (a > b)
42774 return 1;
42775 if (a < b)
42776 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
42777 index 5ddd7eb..c18bf04 100644
42778 --- a/fs/configfs/dir.c
42779 +++ b/fs/configfs/dir.c
42780 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42781 }
42782 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
42783 struct configfs_dirent *next;
42784 - const char * name;
42785 + const unsigned char * name;
42786 + char d_name[sizeof(next->s_dentry->d_iname)];
42787 int len;
42788 struct inode *inode = NULL;
42789
42790 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42791 continue;
42792
42793 name = configfs_get_name(next);
42794 - len = strlen(name);
42795 + if (next->s_dentry && name == next->s_dentry->d_iname) {
42796 + len = next->s_dentry->d_name.len;
42797 + memcpy(d_name, name, len);
42798 + name = d_name;
42799 + } else
42800 + len = strlen(name);
42801
42802 /*
42803 * We'll have a dentry and an inode for
42804 diff --git a/fs/dcache.c b/fs/dcache.c
42805 index bcbdb33..55ffe97 100644
42806 --- a/fs/dcache.c
42807 +++ b/fs/dcache.c
42808 @@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
42809 static struct hlist_bl_head *dentry_hashtable __read_mostly;
42810
42811 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
42812 - unsigned long hash)
42813 + unsigned int hash)
42814 {
42815 - hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
42816 - hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
42817 + hash += (unsigned long) parent / L1_CACHE_BYTES;
42818 + hash = hash + (hash >> D_HASHBITS);
42819 return dentry_hashtable + (hash & D_HASHMASK);
42820 }
42821
42822 @@ -3066,7 +3066,7 @@ void __init vfs_caches_init(unsigned long mempages)
42823 mempages -= reserve;
42824
42825 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
42826 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
42827 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
42828
42829 dcache_init();
42830 inode_init();
42831 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
42832 index 956d5dd..e755e04 100644
42833 --- a/fs/debugfs/inode.c
42834 +++ b/fs/debugfs/inode.c
42835 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
42836 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
42837 {
42838 return debugfs_create_file(name,
42839 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
42840 + S_IFDIR | S_IRWXU,
42841 +#else
42842 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
42843 +#endif
42844 parent, NULL, NULL);
42845 }
42846 EXPORT_SYMBOL_GPL(debugfs_create_dir);
42847 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
42848 index ab35b11..b30af66 100644
42849 --- a/fs/ecryptfs/inode.c
42850 +++ b/fs/ecryptfs/inode.c
42851 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
42852 old_fs = get_fs();
42853 set_fs(get_ds());
42854 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
42855 - (char __user *)lower_buf,
42856 + (char __force_user *)lower_buf,
42857 lower_bufsiz);
42858 set_fs(old_fs);
42859 if (rc < 0)
42860 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42861 }
42862 old_fs = get_fs();
42863 set_fs(get_ds());
42864 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
42865 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
42866 set_fs(old_fs);
42867 if (rc < 0) {
42868 kfree(buf);
42869 @@ -733,7 +733,7 @@ out:
42870 static void
42871 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
42872 {
42873 - char *buf = nd_get_link(nd);
42874 + const char *buf = nd_get_link(nd);
42875 if (!IS_ERR(buf)) {
42876 /* Free the char* */
42877 kfree(buf);
42878 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
42879 index 3a06f40..f7af544 100644
42880 --- a/fs/ecryptfs/miscdev.c
42881 +++ b/fs/ecryptfs/miscdev.c
42882 @@ -345,7 +345,7 @@ check_list:
42883 goto out_unlock_msg_ctx;
42884 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
42885 if (msg_ctx->msg) {
42886 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
42887 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
42888 goto out_unlock_msg_ctx;
42889 i += packet_length_size;
42890 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
42891 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
42892 index b2a34a1..162fa69 100644
42893 --- a/fs/ecryptfs/read_write.c
42894 +++ b/fs/ecryptfs/read_write.c
42895 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
42896 return -EIO;
42897 fs_save = get_fs();
42898 set_fs(get_ds());
42899 - rc = vfs_write(lower_file, data, size, &offset);
42900 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
42901 set_fs(fs_save);
42902 mark_inode_dirty_sync(ecryptfs_inode);
42903 return rc;
42904 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
42905 return -EIO;
42906 fs_save = get_fs();
42907 set_fs(get_ds());
42908 - rc = vfs_read(lower_file, data, size, &offset);
42909 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
42910 set_fs(fs_save);
42911 return rc;
42912 }
42913 diff --git a/fs/exec.c b/fs/exec.c
42914 index 153dee1..8ee97ba 100644
42915 --- a/fs/exec.c
42916 +++ b/fs/exec.c
42917 @@ -55,6 +55,13 @@
42918 #include <linux/pipe_fs_i.h>
42919 #include <linux/oom.h>
42920 #include <linux/compat.h>
42921 +#include <linux/random.h>
42922 +#include <linux/seq_file.h>
42923 +
42924 +#ifdef CONFIG_PAX_REFCOUNT
42925 +#include <linux/kallsyms.h>
42926 +#include <linux/kdebug.h>
42927 +#endif
42928
42929 #include <asm/uaccess.h>
42930 #include <asm/mmu_context.h>
42931 @@ -63,6 +70,15 @@
42932 #include <trace/events/task.h>
42933 #include "internal.h"
42934
42935 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
42936 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
42937 +#endif
42938 +
42939 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
42940 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
42941 +EXPORT_SYMBOL(pax_set_initial_flags_func);
42942 +#endif
42943 +
42944 int core_uses_pid;
42945 char core_pattern[CORENAME_MAX_SIZE] = "core";
42946 unsigned int core_pipe_limit;
42947 @@ -72,7 +88,7 @@ struct core_name {
42948 char *corename;
42949 int used, size;
42950 };
42951 -static atomic_t call_count = ATOMIC_INIT(1);
42952 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
42953
42954 /* The maximal length of core_pattern is also specified in sysctl.c */
42955
42956 @@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
42957 int write)
42958 {
42959 struct page *page;
42960 - int ret;
42961
42962 -#ifdef CONFIG_STACK_GROWSUP
42963 - if (write) {
42964 - ret = expand_downwards(bprm->vma, pos);
42965 - if (ret < 0)
42966 - return NULL;
42967 - }
42968 -#endif
42969 - ret = get_user_pages(current, bprm->mm, pos,
42970 - 1, write, 1, &page, NULL);
42971 - if (ret <= 0)
42972 + if (0 > expand_downwards(bprm->vma, pos))
42973 + return NULL;
42974 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
42975 return NULL;
42976
42977 if (write) {
42978 @@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
42979 if (size <= ARG_MAX)
42980 return page;
42981
42982 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42983 + // only allow 512KB for argv+env on suid/sgid binaries
42984 + // to prevent easy ASLR exhaustion
42985 + if (((bprm->cred->euid != current_euid()) ||
42986 + (bprm->cred->egid != current_egid())) &&
42987 + (size > (512 * 1024))) {
42988 + put_page(page);
42989 + return NULL;
42990 + }
42991 +#endif
42992 +
42993 /*
42994 * Limit to 1/4-th the stack size for the argv+env strings.
42995 * This ensures that:
42996 @@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
42997 vma->vm_end = STACK_TOP_MAX;
42998 vma->vm_start = vma->vm_end - PAGE_SIZE;
42999 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43000 +
43001 +#ifdef CONFIG_PAX_SEGMEXEC
43002 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43003 +#endif
43004 +
43005 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43006 INIT_LIST_HEAD(&vma->anon_vma_chain);
43007
43008 @@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43009 mm->stack_vm = mm->total_vm = 1;
43010 up_write(&mm->mmap_sem);
43011 bprm->p = vma->vm_end - sizeof(void *);
43012 +
43013 +#ifdef CONFIG_PAX_RANDUSTACK
43014 + if (randomize_va_space)
43015 + bprm->p ^= random32() & ~PAGE_MASK;
43016 +#endif
43017 +
43018 return 0;
43019 err:
43020 up_write(&mm->mmap_sem);
43021 @@ -398,19 +428,7 @@ err:
43022 return err;
43023 }
43024
43025 -struct user_arg_ptr {
43026 -#ifdef CONFIG_COMPAT
43027 - bool is_compat;
43028 -#endif
43029 - union {
43030 - const char __user *const __user *native;
43031 -#ifdef CONFIG_COMPAT
43032 - compat_uptr_t __user *compat;
43033 -#endif
43034 - } ptr;
43035 -};
43036 -
43037 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43038 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43039 {
43040 const char __user *native;
43041
43042 @@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43043 compat_uptr_t compat;
43044
43045 if (get_user(compat, argv.ptr.compat + nr))
43046 - return ERR_PTR(-EFAULT);
43047 + return (const char __force_user *)ERR_PTR(-EFAULT);
43048
43049 return compat_ptr(compat);
43050 }
43051 #endif
43052
43053 if (get_user(native, argv.ptr.native + nr))
43054 - return ERR_PTR(-EFAULT);
43055 + return (const char __force_user *)ERR_PTR(-EFAULT);
43056
43057 return native;
43058 }
43059 @@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
43060 if (!p)
43061 break;
43062
43063 - if (IS_ERR(p))
43064 + if (IS_ERR((const char __force_kernel *)p))
43065 return -EFAULT;
43066
43067 if (i++ >= max)
43068 @@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43069
43070 ret = -EFAULT;
43071 str = get_user_arg_ptr(argv, argc);
43072 - if (IS_ERR(str))
43073 + if (IS_ERR((const char __force_kernel *)str))
43074 goto out;
43075
43076 len = strnlen_user(str, MAX_ARG_STRLEN);
43077 @@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43078 int r;
43079 mm_segment_t oldfs = get_fs();
43080 struct user_arg_ptr argv = {
43081 - .ptr.native = (const char __user *const __user *)__argv,
43082 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43083 };
43084
43085 set_fs(KERNEL_DS);
43086 @@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43087 unsigned long new_end = old_end - shift;
43088 struct mmu_gather tlb;
43089
43090 - BUG_ON(new_start > new_end);
43091 + if (new_start >= new_end || new_start < mmap_min_addr)
43092 + return -ENOMEM;
43093
43094 /*
43095 * ensure there are no vmas between where we want to go
43096 @@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43097 if (vma != find_vma(mm, new_start))
43098 return -EFAULT;
43099
43100 +#ifdef CONFIG_PAX_SEGMEXEC
43101 + BUG_ON(pax_find_mirror_vma(vma));
43102 +#endif
43103 +
43104 /*
43105 * cover the whole range: [new_start, old_end)
43106 */
43107 @@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43108 stack_top = arch_align_stack(stack_top);
43109 stack_top = PAGE_ALIGN(stack_top);
43110
43111 - if (unlikely(stack_top < mmap_min_addr) ||
43112 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43113 - return -ENOMEM;
43114 -
43115 stack_shift = vma->vm_end - stack_top;
43116
43117 bprm->p -= stack_shift;
43118 @@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43119 bprm->exec -= stack_shift;
43120
43121 down_write(&mm->mmap_sem);
43122 +
43123 + /* Move stack pages down in memory. */
43124 + if (stack_shift) {
43125 + ret = shift_arg_pages(vma, stack_shift);
43126 + if (ret)
43127 + goto out_unlock;
43128 + }
43129 +
43130 vm_flags = VM_STACK_FLAGS;
43131
43132 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43133 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43134 + vm_flags &= ~VM_EXEC;
43135 +
43136 +#ifdef CONFIG_PAX_MPROTECT
43137 + if (mm->pax_flags & MF_PAX_MPROTECT)
43138 + vm_flags &= ~VM_MAYEXEC;
43139 +#endif
43140 +
43141 + }
43142 +#endif
43143 +
43144 /*
43145 * Adjust stack execute permissions; explicitly enable for
43146 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43147 @@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43148 goto out_unlock;
43149 BUG_ON(prev != vma);
43150
43151 - /* Move stack pages down in memory. */
43152 - if (stack_shift) {
43153 - ret = shift_arg_pages(vma, stack_shift);
43154 - if (ret)
43155 - goto out_unlock;
43156 - }
43157 -
43158 /* mprotect_fixup is overkill to remove the temporary stack flags */
43159 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43160
43161 @@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
43162 old_fs = get_fs();
43163 set_fs(get_ds());
43164 /* The cast to a user pointer is valid due to the set_fs() */
43165 - result = vfs_read(file, (void __user *)addr, count, &pos);
43166 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43167 set_fs(old_fs);
43168 return result;
43169 }
43170 @@ -1252,7 +1284,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43171 }
43172 rcu_read_unlock();
43173
43174 - if (p->fs->users > n_fs) {
43175 + if (atomic_read(&p->fs->users) > n_fs) {
43176 bprm->unsafe |= LSM_UNSAFE_SHARE;
43177 } else {
43178 res = -EAGAIN;
43179 @@ -1447,6 +1479,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43180
43181 EXPORT_SYMBOL(search_binary_handler);
43182
43183 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43184 +static DEFINE_PER_CPU(u64, exec_counter);
43185 +static int __init init_exec_counters(void)
43186 +{
43187 + unsigned int cpu;
43188 +
43189 + for_each_possible_cpu(cpu) {
43190 + per_cpu(exec_counter, cpu) = (u64)cpu;
43191 + }
43192 +
43193 + return 0;
43194 +}
43195 +early_initcall(init_exec_counters);
43196 +static inline void increment_exec_counter(void)
43197 +{
43198 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43199 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43200 +}
43201 +#else
43202 +static inline void increment_exec_counter(void) {}
43203 +#endif
43204 +
43205 /*
43206 * sys_execve() executes a new program.
43207 */
43208 @@ -1455,6 +1509,11 @@ static int do_execve_common(const char *filename,
43209 struct user_arg_ptr envp,
43210 struct pt_regs *regs)
43211 {
43212 +#ifdef CONFIG_GRKERNSEC
43213 + struct file *old_exec_file;
43214 + struct acl_subject_label *old_acl;
43215 + struct rlimit old_rlim[RLIM_NLIMITS];
43216 +#endif
43217 struct linux_binprm *bprm;
43218 struct file *file;
43219 struct files_struct *displaced;
43220 @@ -1462,6 +1521,8 @@ static int do_execve_common(const char *filename,
43221 int retval;
43222 const struct cred *cred = current_cred();
43223
43224 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43225 +
43226 /*
43227 * We move the actual failure in case of RLIMIT_NPROC excess from
43228 * set*uid() to execve() because too many poorly written programs
43229 @@ -1502,12 +1563,27 @@ static int do_execve_common(const char *filename,
43230 if (IS_ERR(file))
43231 goto out_unmark;
43232
43233 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43234 + retval = -EPERM;
43235 + goto out_file;
43236 + }
43237 +
43238 sched_exec();
43239
43240 bprm->file = file;
43241 bprm->filename = filename;
43242 bprm->interp = filename;
43243
43244 + if (gr_process_user_ban()) {
43245 + retval = -EPERM;
43246 + goto out_file;
43247 + }
43248 +
43249 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43250 + retval = -EACCES;
43251 + goto out_file;
43252 + }
43253 +
43254 retval = bprm_mm_init(bprm);
43255 if (retval)
43256 goto out_file;
43257 @@ -1524,24 +1600,65 @@ static int do_execve_common(const char *filename,
43258 if (retval < 0)
43259 goto out;
43260
43261 +#ifdef CONFIG_GRKERNSEC
43262 + old_acl = current->acl;
43263 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43264 + old_exec_file = current->exec_file;
43265 + get_file(file);
43266 + current->exec_file = file;
43267 +#endif
43268 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43269 + /* limit suid stack to 8MB
43270 + we saved the old limits above and will restore them if this exec fails
43271 + */
43272 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43273 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43274 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43275 +#endif
43276 +
43277 + if (!gr_tpe_allow(file)) {
43278 + retval = -EACCES;
43279 + goto out_fail;
43280 + }
43281 +
43282 + if (gr_check_crash_exec(file)) {
43283 + retval = -EACCES;
43284 + goto out_fail;
43285 + }
43286 +
43287 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43288 + bprm->unsafe);
43289 + if (retval < 0)
43290 + goto out_fail;
43291 +
43292 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43293 if (retval < 0)
43294 - goto out;
43295 + goto out_fail;
43296
43297 bprm->exec = bprm->p;
43298 retval = copy_strings(bprm->envc, envp, bprm);
43299 if (retval < 0)
43300 - goto out;
43301 + goto out_fail;
43302
43303 retval = copy_strings(bprm->argc, argv, bprm);
43304 if (retval < 0)
43305 - goto out;
43306 + goto out_fail;
43307 +
43308 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43309 +
43310 + gr_handle_exec_args(bprm, argv);
43311
43312 retval = search_binary_handler(bprm,regs);
43313 if (retval < 0)
43314 - goto out;
43315 + goto out_fail;
43316 +#ifdef CONFIG_GRKERNSEC
43317 + if (old_exec_file)
43318 + fput(old_exec_file);
43319 +#endif
43320
43321 /* execve succeeded */
43322 +
43323 + increment_exec_counter();
43324 current->fs->in_exec = 0;
43325 current->in_execve = 0;
43326 acct_update_integrals(current);
43327 @@ -1550,6 +1667,14 @@ static int do_execve_common(const char *filename,
43328 put_files_struct(displaced);
43329 return retval;
43330
43331 +out_fail:
43332 +#ifdef CONFIG_GRKERNSEC
43333 + current->acl = old_acl;
43334 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43335 + fput(current->exec_file);
43336 + current->exec_file = old_exec_file;
43337 +#endif
43338 +
43339 out:
43340 if (bprm->mm) {
43341 acct_arg_size(bprm, 0);
43342 @@ -1623,7 +1748,7 @@ static int expand_corename(struct core_name *cn)
43343 {
43344 char *old_corename = cn->corename;
43345
43346 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43347 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43348 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43349
43350 if (!cn->corename) {
43351 @@ -1720,7 +1845,7 @@ static int format_corename(struct core_name *cn, long signr)
43352 int pid_in_pattern = 0;
43353 int err = 0;
43354
43355 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43356 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43357 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43358 cn->used = 0;
43359
43360 @@ -1817,6 +1942,218 @@ out:
43361 return ispipe;
43362 }
43363
43364 +int pax_check_flags(unsigned long *flags)
43365 +{
43366 + int retval = 0;
43367 +
43368 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43369 + if (*flags & MF_PAX_SEGMEXEC)
43370 + {
43371 + *flags &= ~MF_PAX_SEGMEXEC;
43372 + retval = -EINVAL;
43373 + }
43374 +#endif
43375 +
43376 + if ((*flags & MF_PAX_PAGEEXEC)
43377 +
43378 +#ifdef CONFIG_PAX_PAGEEXEC
43379 + && (*flags & MF_PAX_SEGMEXEC)
43380 +#endif
43381 +
43382 + )
43383 + {
43384 + *flags &= ~MF_PAX_PAGEEXEC;
43385 + retval = -EINVAL;
43386 + }
43387 +
43388 + if ((*flags & MF_PAX_MPROTECT)
43389 +
43390 +#ifdef CONFIG_PAX_MPROTECT
43391 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43392 +#endif
43393 +
43394 + )
43395 + {
43396 + *flags &= ~MF_PAX_MPROTECT;
43397 + retval = -EINVAL;
43398 + }
43399 +
43400 + if ((*flags & MF_PAX_EMUTRAMP)
43401 +
43402 +#ifdef CONFIG_PAX_EMUTRAMP
43403 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43404 +#endif
43405 +
43406 + )
43407 + {
43408 + *flags &= ~MF_PAX_EMUTRAMP;
43409 + retval = -EINVAL;
43410 + }
43411 +
43412 + return retval;
43413 +}
43414 +
43415 +EXPORT_SYMBOL(pax_check_flags);
43416 +
43417 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43418 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43419 +{
43420 + struct task_struct *tsk = current;
43421 + struct mm_struct *mm = current->mm;
43422 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43423 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43424 + char *path_exec = NULL;
43425 + char *path_fault = NULL;
43426 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43427 +
43428 + if (buffer_exec && buffer_fault) {
43429 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43430 +
43431 + down_read(&mm->mmap_sem);
43432 + vma = mm->mmap;
43433 + while (vma && (!vma_exec || !vma_fault)) {
43434 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43435 + vma_exec = vma;
43436 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43437 + vma_fault = vma;
43438 + vma = vma->vm_next;
43439 + }
43440 + if (vma_exec) {
43441 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43442 + if (IS_ERR(path_exec))
43443 + path_exec = "<path too long>";
43444 + else {
43445 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43446 + if (path_exec) {
43447 + *path_exec = 0;
43448 + path_exec = buffer_exec;
43449 + } else
43450 + path_exec = "<path too long>";
43451 + }
43452 + }
43453 + if (vma_fault) {
43454 + start = vma_fault->vm_start;
43455 + end = vma_fault->vm_end;
43456 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43457 + if (vma_fault->vm_file) {
43458 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43459 + if (IS_ERR(path_fault))
43460 + path_fault = "<path too long>";
43461 + else {
43462 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43463 + if (path_fault) {
43464 + *path_fault = 0;
43465 + path_fault = buffer_fault;
43466 + } else
43467 + path_fault = "<path too long>";
43468 + }
43469 + } else
43470 + path_fault = "<anonymous mapping>";
43471 + }
43472 + up_read(&mm->mmap_sem);
43473 + }
43474 + if (tsk->signal->curr_ip)
43475 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43476 + else
43477 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43478 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43479 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43480 + task_uid(tsk), task_euid(tsk), pc, sp);
43481 + free_page((unsigned long)buffer_exec);
43482 + free_page((unsigned long)buffer_fault);
43483 + pax_report_insns(regs, pc, sp);
43484 + do_coredump(SIGKILL, SIGKILL, regs);
43485 +}
43486 +#endif
43487 +
43488 +#ifdef CONFIG_PAX_REFCOUNT
43489 +void pax_report_refcount_overflow(struct pt_regs *regs)
43490 +{
43491 + if (current->signal->curr_ip)
43492 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43493 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43494 + else
43495 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43496 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43497 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43498 + show_regs(regs);
43499 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43500 +}
43501 +#endif
43502 +
43503 +#ifdef CONFIG_PAX_USERCOPY
43504 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43505 +int object_is_on_stack(const void *obj, unsigned long len)
43506 +{
43507 + const void * const stack = task_stack_page(current);
43508 + const void * const stackend = stack + THREAD_SIZE;
43509 +
43510 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43511 + const void *frame = NULL;
43512 + const void *oldframe;
43513 +#endif
43514 +
43515 + if (obj + len < obj)
43516 + return -1;
43517 +
43518 + if (obj + len <= stack || stackend <= obj)
43519 + return 0;
43520 +
43521 + if (obj < stack || stackend < obj + len)
43522 + return -1;
43523 +
43524 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43525 + oldframe = __builtin_frame_address(1);
43526 + if (oldframe)
43527 + frame = __builtin_frame_address(2);
43528 + /*
43529 + low ----------------------------------------------> high
43530 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43531 + ^----------------^
43532 + allow copies only within here
43533 + */
43534 + while (stack <= frame && frame < stackend) {
43535 + /* if obj + len extends past the last frame, this
43536 + check won't pass and the next frame will be 0,
43537 + causing us to bail out and correctly report
43538 + the copy as invalid
43539 + */
43540 + if (obj + len <= frame)
43541 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43542 + oldframe = frame;
43543 + frame = *(const void * const *)frame;
43544 + }
43545 + return -1;
43546 +#else
43547 + return 1;
43548 +#endif
43549 +}
43550 +
43551 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43552 +{
43553 + if (current->signal->curr_ip)
43554 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43555 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43556 + else
43557 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43558 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43559 + dump_stack();
43560 + gr_handle_kernel_exploit();
43561 + do_group_exit(SIGKILL);
43562 +}
43563 +#endif
43564 +
43565 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43566 +void pax_track_stack(void)
43567 +{
43568 + unsigned long sp = (unsigned long)&sp;
43569 + if (sp < current_thread_info()->lowest_stack &&
43570 + sp > (unsigned long)task_stack_page(current))
43571 + current_thread_info()->lowest_stack = sp;
43572 +}
43573 +EXPORT_SYMBOL(pax_track_stack);
43574 +#endif
43575 +
43576 static int zap_process(struct task_struct *start, int exit_code)
43577 {
43578 struct task_struct *t;
43579 @@ -2014,17 +2351,17 @@ static void wait_for_dump_helpers(struct file *file)
43580 pipe = file->f_path.dentry->d_inode->i_pipe;
43581
43582 pipe_lock(pipe);
43583 - pipe->readers++;
43584 - pipe->writers--;
43585 + atomic_inc(&pipe->readers);
43586 + atomic_dec(&pipe->writers);
43587
43588 - while ((pipe->readers > 1) && (!signal_pending(current))) {
43589 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
43590 wake_up_interruptible_sync(&pipe->wait);
43591 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43592 pipe_wait(pipe);
43593 }
43594
43595 - pipe->readers--;
43596 - pipe->writers++;
43597 + atomic_dec(&pipe->readers);
43598 + atomic_inc(&pipe->writers);
43599 pipe_unlock(pipe);
43600
43601 }
43602 @@ -2085,7 +2422,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43603 int retval = 0;
43604 int flag = 0;
43605 int ispipe;
43606 - static atomic_t core_dump_count = ATOMIC_INIT(0);
43607 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
43608 struct coredump_params cprm = {
43609 .signr = signr,
43610 .regs = regs,
43611 @@ -2100,6 +2437,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43612
43613 audit_core_dumps(signr);
43614
43615 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
43616 + gr_handle_brute_attach(current, cprm.mm_flags);
43617 +
43618 binfmt = mm->binfmt;
43619 if (!binfmt || !binfmt->core_dump)
43620 goto fail;
43621 @@ -2167,7 +2507,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43622 }
43623 cprm.limit = RLIM_INFINITY;
43624
43625 - dump_count = atomic_inc_return(&core_dump_count);
43626 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
43627 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
43628 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
43629 task_tgid_vnr(current), current->comm);
43630 @@ -2194,6 +2534,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43631 } else {
43632 struct inode *inode;
43633
43634 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
43635 +
43636 if (cprm.limit < binfmt->min_coredump)
43637 goto fail_unlock;
43638
43639 @@ -2237,7 +2579,7 @@ close_fail:
43640 filp_close(cprm.file, NULL);
43641 fail_dropcount:
43642 if (ispipe)
43643 - atomic_dec(&core_dump_count);
43644 + atomic_dec_unchecked(&core_dump_count);
43645 fail_unlock:
43646 kfree(cn.corename);
43647 fail_corename:
43648 @@ -2256,7 +2598,7 @@ fail:
43649 */
43650 int dump_write(struct file *file, const void *addr, int nr)
43651 {
43652 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
43653 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
43654 }
43655 EXPORT_SYMBOL(dump_write);
43656
43657 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
43658 index a8cbe1b..fed04cb 100644
43659 --- a/fs/ext2/balloc.c
43660 +++ b/fs/ext2/balloc.c
43661 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
43662
43663 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43664 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43665 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43666 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
43667 sbi->s_resuid != current_fsuid() &&
43668 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43669 return 0;
43670 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
43671 index a203892..4e64db5 100644
43672 --- a/fs/ext3/balloc.c
43673 +++ b/fs/ext3/balloc.c
43674 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
43675
43676 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43677 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43678 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43679 + if (free_blocks < root_blocks + 1 &&
43680 !use_reservation && sbi->s_resuid != current_fsuid() &&
43681 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43682 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
43683 + !capable_nolog(CAP_SYS_RESOURCE)) {
43684 return 0;
43685 }
43686 return 1;
43687 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
43688 index f9e2cd8..bfdc476 100644
43689 --- a/fs/ext4/balloc.c
43690 +++ b/fs/ext4/balloc.c
43691 @@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
43692 /* Hm, nope. Are (enough) root reserved clusters available? */
43693 if (sbi->s_resuid == current_fsuid() ||
43694 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
43695 - capable(CAP_SYS_RESOURCE) ||
43696 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
43697 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
43698 + capable_nolog(CAP_SYS_RESOURCE)) {
43699
43700 if (free_clusters >= (nclusters + dirty_clusters))
43701 return 1;
43702 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
43703 index 513004f..2591a6b 100644
43704 --- a/fs/ext4/ext4.h
43705 +++ b/fs/ext4/ext4.h
43706 @@ -1218,19 +1218,19 @@ struct ext4_sb_info {
43707 unsigned long s_mb_last_start;
43708
43709 /* stats for buddy allocator */
43710 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
43711 - atomic_t s_bal_success; /* we found long enough chunks */
43712 - atomic_t s_bal_allocated; /* in blocks */
43713 - atomic_t s_bal_ex_scanned; /* total extents scanned */
43714 - atomic_t s_bal_goals; /* goal hits */
43715 - atomic_t s_bal_breaks; /* too long searches */
43716 - atomic_t s_bal_2orders; /* 2^order hits */
43717 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
43718 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
43719 + atomic_unchecked_t s_bal_allocated; /* in blocks */
43720 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
43721 + atomic_unchecked_t s_bal_goals; /* goal hits */
43722 + atomic_unchecked_t s_bal_breaks; /* too long searches */
43723 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
43724 spinlock_t s_bal_lock;
43725 unsigned long s_mb_buddies_generated;
43726 unsigned long long s_mb_generation_time;
43727 - atomic_t s_mb_lost_chunks;
43728 - atomic_t s_mb_preallocated;
43729 - atomic_t s_mb_discarded;
43730 + atomic_unchecked_t s_mb_lost_chunks;
43731 + atomic_unchecked_t s_mb_preallocated;
43732 + atomic_unchecked_t s_mb_discarded;
43733 atomic_t s_lock_busy;
43734
43735 /* locality groups */
43736 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
43737 index cb990b2..4820141 100644
43738 --- a/fs/ext4/mballoc.c
43739 +++ b/fs/ext4/mballoc.c
43740 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
43741 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
43742
43743 if (EXT4_SB(sb)->s_mb_stats)
43744 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
43745 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
43746
43747 break;
43748 }
43749 @@ -2088,7 +2088,7 @@ repeat:
43750 ac->ac_status = AC_STATUS_CONTINUE;
43751 ac->ac_flags |= EXT4_MB_HINT_FIRST;
43752 cr = 3;
43753 - atomic_inc(&sbi->s_mb_lost_chunks);
43754 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
43755 goto repeat;
43756 }
43757 }
43758 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
43759 if (sbi->s_mb_stats) {
43760 ext4_msg(sb, KERN_INFO,
43761 "mballoc: %u blocks %u reqs (%u success)",
43762 - atomic_read(&sbi->s_bal_allocated),
43763 - atomic_read(&sbi->s_bal_reqs),
43764 - atomic_read(&sbi->s_bal_success));
43765 + atomic_read_unchecked(&sbi->s_bal_allocated),
43766 + atomic_read_unchecked(&sbi->s_bal_reqs),
43767 + atomic_read_unchecked(&sbi->s_bal_success));
43768 ext4_msg(sb, KERN_INFO,
43769 "mballoc: %u extents scanned, %u goal hits, "
43770 "%u 2^N hits, %u breaks, %u lost",
43771 - atomic_read(&sbi->s_bal_ex_scanned),
43772 - atomic_read(&sbi->s_bal_goals),
43773 - atomic_read(&sbi->s_bal_2orders),
43774 - atomic_read(&sbi->s_bal_breaks),
43775 - atomic_read(&sbi->s_mb_lost_chunks));
43776 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
43777 + atomic_read_unchecked(&sbi->s_bal_goals),
43778 + atomic_read_unchecked(&sbi->s_bal_2orders),
43779 + atomic_read_unchecked(&sbi->s_bal_breaks),
43780 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
43781 ext4_msg(sb, KERN_INFO,
43782 "mballoc: %lu generated and it took %Lu",
43783 sbi->s_mb_buddies_generated,
43784 sbi->s_mb_generation_time);
43785 ext4_msg(sb, KERN_INFO,
43786 "mballoc: %u preallocated, %u discarded",
43787 - atomic_read(&sbi->s_mb_preallocated),
43788 - atomic_read(&sbi->s_mb_discarded));
43789 + atomic_read_unchecked(&sbi->s_mb_preallocated),
43790 + atomic_read_unchecked(&sbi->s_mb_discarded));
43791 }
43792
43793 free_percpu(sbi->s_locality_groups);
43794 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
43795 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
43796
43797 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
43798 - atomic_inc(&sbi->s_bal_reqs);
43799 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43800 + atomic_inc_unchecked(&sbi->s_bal_reqs);
43801 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43802 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
43803 - atomic_inc(&sbi->s_bal_success);
43804 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
43805 + atomic_inc_unchecked(&sbi->s_bal_success);
43806 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
43807 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
43808 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
43809 - atomic_inc(&sbi->s_bal_goals);
43810 + atomic_inc_unchecked(&sbi->s_bal_goals);
43811 if (ac->ac_found > sbi->s_mb_max_to_scan)
43812 - atomic_inc(&sbi->s_bal_breaks);
43813 + atomic_inc_unchecked(&sbi->s_bal_breaks);
43814 }
43815
43816 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
43817 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
43818 trace_ext4_mb_new_inode_pa(ac, pa);
43819
43820 ext4_mb_use_inode_pa(ac, pa);
43821 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
43822 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
43823
43824 ei = EXT4_I(ac->ac_inode);
43825 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43826 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
43827 trace_ext4_mb_new_group_pa(ac, pa);
43828
43829 ext4_mb_use_group_pa(ac, pa);
43830 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43831 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43832
43833 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43834 lg = ac->ac_lg;
43835 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
43836 * from the bitmap and continue.
43837 */
43838 }
43839 - atomic_add(free, &sbi->s_mb_discarded);
43840 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
43841
43842 return err;
43843 }
43844 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
43845 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
43846 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
43847 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
43848 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43849 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43850 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
43851
43852 return 0;
43853 diff --git a/fs/fcntl.c b/fs/fcntl.c
43854 index 22764c7..86372c9 100644
43855 --- a/fs/fcntl.c
43856 +++ b/fs/fcntl.c
43857 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
43858 if (err)
43859 return err;
43860
43861 + if (gr_handle_chroot_fowner(pid, type))
43862 + return -ENOENT;
43863 + if (gr_check_protected_task_fowner(pid, type))
43864 + return -EACCES;
43865 +
43866 f_modown(filp, pid, type, force);
43867 return 0;
43868 }
43869 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
43870
43871 static int f_setown_ex(struct file *filp, unsigned long arg)
43872 {
43873 - struct f_owner_ex * __user owner_p = (void * __user)arg;
43874 + struct f_owner_ex __user *owner_p = (void __user *)arg;
43875 struct f_owner_ex owner;
43876 struct pid *pid;
43877 int type;
43878 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
43879
43880 static int f_getown_ex(struct file *filp, unsigned long arg)
43881 {
43882 - struct f_owner_ex * __user owner_p = (void * __user)arg;
43883 + struct f_owner_ex __user *owner_p = (void __user *)arg;
43884 struct f_owner_ex owner;
43885 int ret = 0;
43886
43887 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
43888 switch (cmd) {
43889 case F_DUPFD:
43890 case F_DUPFD_CLOEXEC:
43891 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
43892 if (arg >= rlimit(RLIMIT_NOFILE))
43893 break;
43894 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
43895 diff --git a/fs/fifo.c b/fs/fifo.c
43896 index b1a524d..4ee270e 100644
43897 --- a/fs/fifo.c
43898 +++ b/fs/fifo.c
43899 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
43900 */
43901 filp->f_op = &read_pipefifo_fops;
43902 pipe->r_counter++;
43903 - if (pipe->readers++ == 0)
43904 + if (atomic_inc_return(&pipe->readers) == 1)
43905 wake_up_partner(inode);
43906
43907 - if (!pipe->writers) {
43908 + if (!atomic_read(&pipe->writers)) {
43909 if ((filp->f_flags & O_NONBLOCK)) {
43910 /* suppress POLLHUP until we have
43911 * seen a writer */
43912 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
43913 * errno=ENXIO when there is no process reading the FIFO.
43914 */
43915 ret = -ENXIO;
43916 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
43917 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
43918 goto err;
43919
43920 filp->f_op = &write_pipefifo_fops;
43921 pipe->w_counter++;
43922 - if (!pipe->writers++)
43923 + if (atomic_inc_return(&pipe->writers) == 1)
43924 wake_up_partner(inode);
43925
43926 - if (!pipe->readers) {
43927 + if (!atomic_read(&pipe->readers)) {
43928 wait_for_partner(inode, &pipe->r_counter);
43929 if (signal_pending(current))
43930 goto err_wr;
43931 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
43932 */
43933 filp->f_op = &rdwr_pipefifo_fops;
43934
43935 - pipe->readers++;
43936 - pipe->writers++;
43937 + atomic_inc(&pipe->readers);
43938 + atomic_inc(&pipe->writers);
43939 pipe->r_counter++;
43940 pipe->w_counter++;
43941 - if (pipe->readers == 1 || pipe->writers == 1)
43942 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
43943 wake_up_partner(inode);
43944 break;
43945
43946 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
43947 return 0;
43948
43949 err_rd:
43950 - if (!--pipe->readers)
43951 + if (atomic_dec_and_test(&pipe->readers))
43952 wake_up_interruptible(&pipe->wait);
43953 ret = -ERESTARTSYS;
43954 goto err;
43955
43956 err_wr:
43957 - if (!--pipe->writers)
43958 + if (atomic_dec_and_test(&pipe->writers))
43959 wake_up_interruptible(&pipe->wait);
43960 ret = -ERESTARTSYS;
43961 goto err;
43962
43963 err:
43964 - if (!pipe->readers && !pipe->writers)
43965 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
43966 free_pipe_info(inode);
43967
43968 err_nocleanup:
43969 diff --git a/fs/file.c b/fs/file.c
43970 index 4c6992d..104cdea 100644
43971 --- a/fs/file.c
43972 +++ b/fs/file.c
43973 @@ -15,6 +15,7 @@
43974 #include <linux/slab.h>
43975 #include <linux/vmalloc.h>
43976 #include <linux/file.h>
43977 +#include <linux/security.h>
43978 #include <linux/fdtable.h>
43979 #include <linux/bitops.h>
43980 #include <linux/interrupt.h>
43981 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
43982 * N.B. For clone tasks sharing a files structure, this test
43983 * will limit the total number of files that can be opened.
43984 */
43985 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
43986 if (nr >= rlimit(RLIMIT_NOFILE))
43987 return -EMFILE;
43988
43989 diff --git a/fs/filesystems.c b/fs/filesystems.c
43990 index 96f2428..f5eeb8e 100644
43991 --- a/fs/filesystems.c
43992 +++ b/fs/filesystems.c
43993 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
43994 int len = dot ? dot - name : strlen(name);
43995
43996 fs = __get_fs_type(name, len);
43997 +
43998 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
43999 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44000 +#else
44001 if (!fs && (request_module("%.*s", len, name) == 0))
44002 +#endif
44003 fs = __get_fs_type(name, len);
44004
44005 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44006 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44007 index 78b519c..a8b4979 100644
44008 --- a/fs/fs_struct.c
44009 +++ b/fs/fs_struct.c
44010 @@ -4,6 +4,7 @@
44011 #include <linux/path.h>
44012 #include <linux/slab.h>
44013 #include <linux/fs_struct.h>
44014 +#include <linux/grsecurity.h>
44015 #include "internal.h"
44016
44017 static inline void path_get_longterm(struct path *path)
44018 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44019 old_root = fs->root;
44020 fs->root = *path;
44021 path_get_longterm(path);
44022 + gr_set_chroot_entries(current, path);
44023 write_seqcount_end(&fs->seq);
44024 spin_unlock(&fs->lock);
44025 if (old_root.dentry)
44026 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44027 && fs->root.mnt == old_root->mnt) {
44028 path_get_longterm(new_root);
44029 fs->root = *new_root;
44030 + gr_set_chroot_entries(p, new_root);
44031 count++;
44032 }
44033 if (fs->pwd.dentry == old_root->dentry
44034 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44035 spin_lock(&fs->lock);
44036 write_seqcount_begin(&fs->seq);
44037 tsk->fs = NULL;
44038 - kill = !--fs->users;
44039 + gr_clear_chroot_entries(tsk);
44040 + kill = !atomic_dec_return(&fs->users);
44041 write_seqcount_end(&fs->seq);
44042 spin_unlock(&fs->lock);
44043 task_unlock(tsk);
44044 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44045 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44046 /* We don't need to lock fs - think why ;-) */
44047 if (fs) {
44048 - fs->users = 1;
44049 + atomic_set(&fs->users, 1);
44050 fs->in_exec = 0;
44051 spin_lock_init(&fs->lock);
44052 seqcount_init(&fs->seq);
44053 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44054 spin_lock(&old->lock);
44055 fs->root = old->root;
44056 path_get_longterm(&fs->root);
44057 + /* instead of calling gr_set_chroot_entries here,
44058 + we call it from every caller of this function
44059 + */
44060 fs->pwd = old->pwd;
44061 path_get_longterm(&fs->pwd);
44062 spin_unlock(&old->lock);
44063 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44064
44065 task_lock(current);
44066 spin_lock(&fs->lock);
44067 - kill = !--fs->users;
44068 + kill = !atomic_dec_return(&fs->users);
44069 current->fs = new_fs;
44070 + gr_set_chroot_entries(current, &new_fs->root);
44071 spin_unlock(&fs->lock);
44072 task_unlock(current);
44073
44074 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44075
44076 int current_umask(void)
44077 {
44078 - return current->fs->umask;
44079 + return current->fs->umask | gr_acl_umask();
44080 }
44081 EXPORT_SYMBOL(current_umask);
44082
44083 /* to be mentioned only in INIT_TASK */
44084 struct fs_struct init_fs = {
44085 - .users = 1,
44086 + .users = ATOMIC_INIT(1),
44087 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44088 .seq = SEQCNT_ZERO,
44089 .umask = 0022,
44090 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44091 task_lock(current);
44092
44093 spin_lock(&init_fs.lock);
44094 - init_fs.users++;
44095 + atomic_inc(&init_fs.users);
44096 spin_unlock(&init_fs.lock);
44097
44098 spin_lock(&fs->lock);
44099 current->fs = &init_fs;
44100 - kill = !--fs->users;
44101 + gr_set_chroot_entries(current, &current->fs->root);
44102 + kill = !atomic_dec_return(&fs->users);
44103 spin_unlock(&fs->lock);
44104
44105 task_unlock(current);
44106 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44107 index 9905350..02eaec4 100644
44108 --- a/fs/fscache/cookie.c
44109 +++ b/fs/fscache/cookie.c
44110 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44111 parent ? (char *) parent->def->name : "<no-parent>",
44112 def->name, netfs_data);
44113
44114 - fscache_stat(&fscache_n_acquires);
44115 + fscache_stat_unchecked(&fscache_n_acquires);
44116
44117 /* if there's no parent cookie, then we don't create one here either */
44118 if (!parent) {
44119 - fscache_stat(&fscache_n_acquires_null);
44120 + fscache_stat_unchecked(&fscache_n_acquires_null);
44121 _leave(" [no parent]");
44122 return NULL;
44123 }
44124 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44125 /* allocate and initialise a cookie */
44126 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44127 if (!cookie) {
44128 - fscache_stat(&fscache_n_acquires_oom);
44129 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44130 _leave(" [ENOMEM]");
44131 return NULL;
44132 }
44133 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44134
44135 switch (cookie->def->type) {
44136 case FSCACHE_COOKIE_TYPE_INDEX:
44137 - fscache_stat(&fscache_n_cookie_index);
44138 + fscache_stat_unchecked(&fscache_n_cookie_index);
44139 break;
44140 case FSCACHE_COOKIE_TYPE_DATAFILE:
44141 - fscache_stat(&fscache_n_cookie_data);
44142 + fscache_stat_unchecked(&fscache_n_cookie_data);
44143 break;
44144 default:
44145 - fscache_stat(&fscache_n_cookie_special);
44146 + fscache_stat_unchecked(&fscache_n_cookie_special);
44147 break;
44148 }
44149
44150 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44151 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44152 atomic_dec(&parent->n_children);
44153 __fscache_cookie_put(cookie);
44154 - fscache_stat(&fscache_n_acquires_nobufs);
44155 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44156 _leave(" = NULL");
44157 return NULL;
44158 }
44159 }
44160
44161 - fscache_stat(&fscache_n_acquires_ok);
44162 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44163 _leave(" = %p", cookie);
44164 return cookie;
44165 }
44166 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44167 cache = fscache_select_cache_for_object(cookie->parent);
44168 if (!cache) {
44169 up_read(&fscache_addremove_sem);
44170 - fscache_stat(&fscache_n_acquires_no_cache);
44171 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44172 _leave(" = -ENOMEDIUM [no cache]");
44173 return -ENOMEDIUM;
44174 }
44175 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44176 object = cache->ops->alloc_object(cache, cookie);
44177 fscache_stat_d(&fscache_n_cop_alloc_object);
44178 if (IS_ERR(object)) {
44179 - fscache_stat(&fscache_n_object_no_alloc);
44180 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44181 ret = PTR_ERR(object);
44182 goto error;
44183 }
44184
44185 - fscache_stat(&fscache_n_object_alloc);
44186 + fscache_stat_unchecked(&fscache_n_object_alloc);
44187
44188 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44189
44190 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44191 struct fscache_object *object;
44192 struct hlist_node *_p;
44193
44194 - fscache_stat(&fscache_n_updates);
44195 + fscache_stat_unchecked(&fscache_n_updates);
44196
44197 if (!cookie) {
44198 - fscache_stat(&fscache_n_updates_null);
44199 + fscache_stat_unchecked(&fscache_n_updates_null);
44200 _leave(" [no cookie]");
44201 return;
44202 }
44203 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44204 struct fscache_object *object;
44205 unsigned long event;
44206
44207 - fscache_stat(&fscache_n_relinquishes);
44208 + fscache_stat_unchecked(&fscache_n_relinquishes);
44209 if (retire)
44210 - fscache_stat(&fscache_n_relinquishes_retire);
44211 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44212
44213 if (!cookie) {
44214 - fscache_stat(&fscache_n_relinquishes_null);
44215 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44216 _leave(" [no cookie]");
44217 return;
44218 }
44219 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44220
44221 /* wait for the cookie to finish being instantiated (or to fail) */
44222 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44223 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44224 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44225 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44226 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44227 }
44228 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44229 index f6aad48..88dcf26 100644
44230 --- a/fs/fscache/internal.h
44231 +++ b/fs/fscache/internal.h
44232 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44233 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44234 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44235
44236 -extern atomic_t fscache_n_op_pend;
44237 -extern atomic_t fscache_n_op_run;
44238 -extern atomic_t fscache_n_op_enqueue;
44239 -extern atomic_t fscache_n_op_deferred_release;
44240 -extern atomic_t fscache_n_op_release;
44241 -extern atomic_t fscache_n_op_gc;
44242 -extern atomic_t fscache_n_op_cancelled;
44243 -extern atomic_t fscache_n_op_rejected;
44244 +extern atomic_unchecked_t fscache_n_op_pend;
44245 +extern atomic_unchecked_t fscache_n_op_run;
44246 +extern atomic_unchecked_t fscache_n_op_enqueue;
44247 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44248 +extern atomic_unchecked_t fscache_n_op_release;
44249 +extern atomic_unchecked_t fscache_n_op_gc;
44250 +extern atomic_unchecked_t fscache_n_op_cancelled;
44251 +extern atomic_unchecked_t fscache_n_op_rejected;
44252
44253 -extern atomic_t fscache_n_attr_changed;
44254 -extern atomic_t fscache_n_attr_changed_ok;
44255 -extern atomic_t fscache_n_attr_changed_nobufs;
44256 -extern atomic_t fscache_n_attr_changed_nomem;
44257 -extern atomic_t fscache_n_attr_changed_calls;
44258 +extern atomic_unchecked_t fscache_n_attr_changed;
44259 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44260 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44261 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44262 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44263
44264 -extern atomic_t fscache_n_allocs;
44265 -extern atomic_t fscache_n_allocs_ok;
44266 -extern atomic_t fscache_n_allocs_wait;
44267 -extern atomic_t fscache_n_allocs_nobufs;
44268 -extern atomic_t fscache_n_allocs_intr;
44269 -extern atomic_t fscache_n_allocs_object_dead;
44270 -extern atomic_t fscache_n_alloc_ops;
44271 -extern atomic_t fscache_n_alloc_op_waits;
44272 +extern atomic_unchecked_t fscache_n_allocs;
44273 +extern atomic_unchecked_t fscache_n_allocs_ok;
44274 +extern atomic_unchecked_t fscache_n_allocs_wait;
44275 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44276 +extern atomic_unchecked_t fscache_n_allocs_intr;
44277 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44278 +extern atomic_unchecked_t fscache_n_alloc_ops;
44279 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44280
44281 -extern atomic_t fscache_n_retrievals;
44282 -extern atomic_t fscache_n_retrievals_ok;
44283 -extern atomic_t fscache_n_retrievals_wait;
44284 -extern atomic_t fscache_n_retrievals_nodata;
44285 -extern atomic_t fscache_n_retrievals_nobufs;
44286 -extern atomic_t fscache_n_retrievals_intr;
44287 -extern atomic_t fscache_n_retrievals_nomem;
44288 -extern atomic_t fscache_n_retrievals_object_dead;
44289 -extern atomic_t fscache_n_retrieval_ops;
44290 -extern atomic_t fscache_n_retrieval_op_waits;
44291 +extern atomic_unchecked_t fscache_n_retrievals;
44292 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44293 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44294 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44295 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44296 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44297 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44298 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44299 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44300 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44301
44302 -extern atomic_t fscache_n_stores;
44303 -extern atomic_t fscache_n_stores_ok;
44304 -extern atomic_t fscache_n_stores_again;
44305 -extern atomic_t fscache_n_stores_nobufs;
44306 -extern atomic_t fscache_n_stores_oom;
44307 -extern atomic_t fscache_n_store_ops;
44308 -extern atomic_t fscache_n_store_calls;
44309 -extern atomic_t fscache_n_store_pages;
44310 -extern atomic_t fscache_n_store_radix_deletes;
44311 -extern atomic_t fscache_n_store_pages_over_limit;
44312 +extern atomic_unchecked_t fscache_n_stores;
44313 +extern atomic_unchecked_t fscache_n_stores_ok;
44314 +extern atomic_unchecked_t fscache_n_stores_again;
44315 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44316 +extern atomic_unchecked_t fscache_n_stores_oom;
44317 +extern atomic_unchecked_t fscache_n_store_ops;
44318 +extern atomic_unchecked_t fscache_n_store_calls;
44319 +extern atomic_unchecked_t fscache_n_store_pages;
44320 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44321 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44322
44323 -extern atomic_t fscache_n_store_vmscan_not_storing;
44324 -extern atomic_t fscache_n_store_vmscan_gone;
44325 -extern atomic_t fscache_n_store_vmscan_busy;
44326 -extern atomic_t fscache_n_store_vmscan_cancelled;
44327 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44328 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44329 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44330 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44331
44332 -extern atomic_t fscache_n_marks;
44333 -extern atomic_t fscache_n_uncaches;
44334 +extern atomic_unchecked_t fscache_n_marks;
44335 +extern atomic_unchecked_t fscache_n_uncaches;
44336
44337 -extern atomic_t fscache_n_acquires;
44338 -extern atomic_t fscache_n_acquires_null;
44339 -extern atomic_t fscache_n_acquires_no_cache;
44340 -extern atomic_t fscache_n_acquires_ok;
44341 -extern atomic_t fscache_n_acquires_nobufs;
44342 -extern atomic_t fscache_n_acquires_oom;
44343 +extern atomic_unchecked_t fscache_n_acquires;
44344 +extern atomic_unchecked_t fscache_n_acquires_null;
44345 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44346 +extern atomic_unchecked_t fscache_n_acquires_ok;
44347 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44348 +extern atomic_unchecked_t fscache_n_acquires_oom;
44349
44350 -extern atomic_t fscache_n_updates;
44351 -extern atomic_t fscache_n_updates_null;
44352 -extern atomic_t fscache_n_updates_run;
44353 +extern atomic_unchecked_t fscache_n_updates;
44354 +extern atomic_unchecked_t fscache_n_updates_null;
44355 +extern atomic_unchecked_t fscache_n_updates_run;
44356
44357 -extern atomic_t fscache_n_relinquishes;
44358 -extern atomic_t fscache_n_relinquishes_null;
44359 -extern atomic_t fscache_n_relinquishes_waitcrt;
44360 -extern atomic_t fscache_n_relinquishes_retire;
44361 +extern atomic_unchecked_t fscache_n_relinquishes;
44362 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44363 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44364 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44365
44366 -extern atomic_t fscache_n_cookie_index;
44367 -extern atomic_t fscache_n_cookie_data;
44368 -extern atomic_t fscache_n_cookie_special;
44369 +extern atomic_unchecked_t fscache_n_cookie_index;
44370 +extern atomic_unchecked_t fscache_n_cookie_data;
44371 +extern atomic_unchecked_t fscache_n_cookie_special;
44372
44373 -extern atomic_t fscache_n_object_alloc;
44374 -extern atomic_t fscache_n_object_no_alloc;
44375 -extern atomic_t fscache_n_object_lookups;
44376 -extern atomic_t fscache_n_object_lookups_negative;
44377 -extern atomic_t fscache_n_object_lookups_positive;
44378 -extern atomic_t fscache_n_object_lookups_timed_out;
44379 -extern atomic_t fscache_n_object_created;
44380 -extern atomic_t fscache_n_object_avail;
44381 -extern atomic_t fscache_n_object_dead;
44382 +extern atomic_unchecked_t fscache_n_object_alloc;
44383 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44384 +extern atomic_unchecked_t fscache_n_object_lookups;
44385 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44386 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44387 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44388 +extern atomic_unchecked_t fscache_n_object_created;
44389 +extern atomic_unchecked_t fscache_n_object_avail;
44390 +extern atomic_unchecked_t fscache_n_object_dead;
44391
44392 -extern atomic_t fscache_n_checkaux_none;
44393 -extern atomic_t fscache_n_checkaux_okay;
44394 -extern atomic_t fscache_n_checkaux_update;
44395 -extern atomic_t fscache_n_checkaux_obsolete;
44396 +extern atomic_unchecked_t fscache_n_checkaux_none;
44397 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44398 +extern atomic_unchecked_t fscache_n_checkaux_update;
44399 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44400
44401 extern atomic_t fscache_n_cop_alloc_object;
44402 extern atomic_t fscache_n_cop_lookup_object;
44403 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44404 atomic_inc(stat);
44405 }
44406
44407 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44408 +{
44409 + atomic_inc_unchecked(stat);
44410 +}
44411 +
44412 static inline void fscache_stat_d(atomic_t *stat)
44413 {
44414 atomic_dec(stat);
44415 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44416
44417 #define __fscache_stat(stat) (NULL)
44418 #define fscache_stat(stat) do {} while (0)
44419 +#define fscache_stat_unchecked(stat) do {} while (0)
44420 #define fscache_stat_d(stat) do {} while (0)
44421 #endif
44422
44423 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44424 index b6b897c..0ffff9c 100644
44425 --- a/fs/fscache/object.c
44426 +++ b/fs/fscache/object.c
44427 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44428 /* update the object metadata on disk */
44429 case FSCACHE_OBJECT_UPDATING:
44430 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44431 - fscache_stat(&fscache_n_updates_run);
44432 + fscache_stat_unchecked(&fscache_n_updates_run);
44433 fscache_stat(&fscache_n_cop_update_object);
44434 object->cache->ops->update_object(object);
44435 fscache_stat_d(&fscache_n_cop_update_object);
44436 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44437 spin_lock(&object->lock);
44438 object->state = FSCACHE_OBJECT_DEAD;
44439 spin_unlock(&object->lock);
44440 - fscache_stat(&fscache_n_object_dead);
44441 + fscache_stat_unchecked(&fscache_n_object_dead);
44442 goto terminal_transit;
44443
44444 /* handle the parent cache of this object being withdrawn from
44445 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44446 spin_lock(&object->lock);
44447 object->state = FSCACHE_OBJECT_DEAD;
44448 spin_unlock(&object->lock);
44449 - fscache_stat(&fscache_n_object_dead);
44450 + fscache_stat_unchecked(&fscache_n_object_dead);
44451 goto terminal_transit;
44452
44453 /* complain about the object being woken up once it is
44454 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44455 parent->cookie->def->name, cookie->def->name,
44456 object->cache->tag->name);
44457
44458 - fscache_stat(&fscache_n_object_lookups);
44459 + fscache_stat_unchecked(&fscache_n_object_lookups);
44460 fscache_stat(&fscache_n_cop_lookup_object);
44461 ret = object->cache->ops->lookup_object(object);
44462 fscache_stat_d(&fscache_n_cop_lookup_object);
44463 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44464 if (ret == -ETIMEDOUT) {
44465 /* probably stuck behind another object, so move this one to
44466 * the back of the queue */
44467 - fscache_stat(&fscache_n_object_lookups_timed_out);
44468 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44469 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44470 }
44471
44472 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44473
44474 spin_lock(&object->lock);
44475 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44476 - fscache_stat(&fscache_n_object_lookups_negative);
44477 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44478
44479 /* transit here to allow write requests to begin stacking up
44480 * and read requests to begin returning ENODATA */
44481 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44482 * result, in which case there may be data available */
44483 spin_lock(&object->lock);
44484 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44485 - fscache_stat(&fscache_n_object_lookups_positive);
44486 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44487
44488 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44489
44490 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44491 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44492 } else {
44493 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44494 - fscache_stat(&fscache_n_object_created);
44495 + fscache_stat_unchecked(&fscache_n_object_created);
44496
44497 object->state = FSCACHE_OBJECT_AVAILABLE;
44498 spin_unlock(&object->lock);
44499 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44500 fscache_enqueue_dependents(object);
44501
44502 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44503 - fscache_stat(&fscache_n_object_avail);
44504 + fscache_stat_unchecked(&fscache_n_object_avail);
44505
44506 _leave("");
44507 }
44508 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44509 enum fscache_checkaux result;
44510
44511 if (!object->cookie->def->check_aux) {
44512 - fscache_stat(&fscache_n_checkaux_none);
44513 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44514 return FSCACHE_CHECKAUX_OKAY;
44515 }
44516
44517 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44518 switch (result) {
44519 /* entry okay as is */
44520 case FSCACHE_CHECKAUX_OKAY:
44521 - fscache_stat(&fscache_n_checkaux_okay);
44522 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44523 break;
44524
44525 /* entry requires update */
44526 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44527 - fscache_stat(&fscache_n_checkaux_update);
44528 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44529 break;
44530
44531 /* entry requires deletion */
44532 case FSCACHE_CHECKAUX_OBSOLETE:
44533 - fscache_stat(&fscache_n_checkaux_obsolete);
44534 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44535 break;
44536
44537 default:
44538 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44539 index 30afdfa..2256596 100644
44540 --- a/fs/fscache/operation.c
44541 +++ b/fs/fscache/operation.c
44542 @@ -17,7 +17,7 @@
44543 #include <linux/slab.h>
44544 #include "internal.h"
44545
44546 -atomic_t fscache_op_debug_id;
44547 +atomic_unchecked_t fscache_op_debug_id;
44548 EXPORT_SYMBOL(fscache_op_debug_id);
44549
44550 /**
44551 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44552 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44553 ASSERTCMP(atomic_read(&op->usage), >, 0);
44554
44555 - fscache_stat(&fscache_n_op_enqueue);
44556 + fscache_stat_unchecked(&fscache_n_op_enqueue);
44557 switch (op->flags & FSCACHE_OP_TYPE) {
44558 case FSCACHE_OP_ASYNC:
44559 _debug("queue async");
44560 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
44561 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
44562 if (op->processor)
44563 fscache_enqueue_operation(op);
44564 - fscache_stat(&fscache_n_op_run);
44565 + fscache_stat_unchecked(&fscache_n_op_run);
44566 }
44567
44568 /*
44569 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44570 if (object->n_ops > 1) {
44571 atomic_inc(&op->usage);
44572 list_add_tail(&op->pend_link, &object->pending_ops);
44573 - fscache_stat(&fscache_n_op_pend);
44574 + fscache_stat_unchecked(&fscache_n_op_pend);
44575 } else if (!list_empty(&object->pending_ops)) {
44576 atomic_inc(&op->usage);
44577 list_add_tail(&op->pend_link, &object->pending_ops);
44578 - fscache_stat(&fscache_n_op_pend);
44579 + fscache_stat_unchecked(&fscache_n_op_pend);
44580 fscache_start_operations(object);
44581 } else {
44582 ASSERTCMP(object->n_in_progress, ==, 0);
44583 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44584 object->n_exclusive++; /* reads and writes must wait */
44585 atomic_inc(&op->usage);
44586 list_add_tail(&op->pend_link, &object->pending_ops);
44587 - fscache_stat(&fscache_n_op_pend);
44588 + fscache_stat_unchecked(&fscache_n_op_pend);
44589 ret = 0;
44590 } else {
44591 /* not allowed to submit ops in any other state */
44592 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
44593 if (object->n_exclusive > 0) {
44594 atomic_inc(&op->usage);
44595 list_add_tail(&op->pend_link, &object->pending_ops);
44596 - fscache_stat(&fscache_n_op_pend);
44597 + fscache_stat_unchecked(&fscache_n_op_pend);
44598 } else if (!list_empty(&object->pending_ops)) {
44599 atomic_inc(&op->usage);
44600 list_add_tail(&op->pend_link, &object->pending_ops);
44601 - fscache_stat(&fscache_n_op_pend);
44602 + fscache_stat_unchecked(&fscache_n_op_pend);
44603 fscache_start_operations(object);
44604 } else {
44605 ASSERTCMP(object->n_exclusive, ==, 0);
44606 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
44607 object->n_ops++;
44608 atomic_inc(&op->usage);
44609 list_add_tail(&op->pend_link, &object->pending_ops);
44610 - fscache_stat(&fscache_n_op_pend);
44611 + fscache_stat_unchecked(&fscache_n_op_pend);
44612 ret = 0;
44613 } else if (object->state == FSCACHE_OBJECT_DYING ||
44614 object->state == FSCACHE_OBJECT_LC_DYING ||
44615 object->state == FSCACHE_OBJECT_WITHDRAWING) {
44616 - fscache_stat(&fscache_n_op_rejected);
44617 + fscache_stat_unchecked(&fscache_n_op_rejected);
44618 ret = -ENOBUFS;
44619 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
44620 fscache_report_unexpected_submission(object, op, ostate);
44621 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
44622
44623 ret = -EBUSY;
44624 if (!list_empty(&op->pend_link)) {
44625 - fscache_stat(&fscache_n_op_cancelled);
44626 + fscache_stat_unchecked(&fscache_n_op_cancelled);
44627 list_del_init(&op->pend_link);
44628 object->n_ops--;
44629 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
44630 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
44631 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
44632 BUG();
44633
44634 - fscache_stat(&fscache_n_op_release);
44635 + fscache_stat_unchecked(&fscache_n_op_release);
44636
44637 if (op->release) {
44638 op->release(op);
44639 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
44640 * lock, and defer it otherwise */
44641 if (!spin_trylock(&object->lock)) {
44642 _debug("defer put");
44643 - fscache_stat(&fscache_n_op_deferred_release);
44644 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
44645
44646 cache = object->cache;
44647 spin_lock(&cache->op_gc_list_lock);
44648 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
44649
44650 _debug("GC DEFERRED REL OBJ%x OP%x",
44651 object->debug_id, op->debug_id);
44652 - fscache_stat(&fscache_n_op_gc);
44653 + fscache_stat_unchecked(&fscache_n_op_gc);
44654
44655 ASSERTCMP(atomic_read(&op->usage), ==, 0);
44656
44657 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
44658 index 3f7a59b..cf196cc 100644
44659 --- a/fs/fscache/page.c
44660 +++ b/fs/fscache/page.c
44661 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44662 val = radix_tree_lookup(&cookie->stores, page->index);
44663 if (!val) {
44664 rcu_read_unlock();
44665 - fscache_stat(&fscache_n_store_vmscan_not_storing);
44666 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
44667 __fscache_uncache_page(cookie, page);
44668 return true;
44669 }
44670 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44671 spin_unlock(&cookie->stores_lock);
44672
44673 if (xpage) {
44674 - fscache_stat(&fscache_n_store_vmscan_cancelled);
44675 - fscache_stat(&fscache_n_store_radix_deletes);
44676 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
44677 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44678 ASSERTCMP(xpage, ==, page);
44679 } else {
44680 - fscache_stat(&fscache_n_store_vmscan_gone);
44681 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
44682 }
44683
44684 wake_up_bit(&cookie->flags, 0);
44685 @@ -107,7 +107,7 @@ page_busy:
44686 /* we might want to wait here, but that could deadlock the allocator as
44687 * the work threads writing to the cache may all end up sleeping
44688 * on memory allocation */
44689 - fscache_stat(&fscache_n_store_vmscan_busy);
44690 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
44691 return false;
44692 }
44693 EXPORT_SYMBOL(__fscache_maybe_release_page);
44694 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
44695 FSCACHE_COOKIE_STORING_TAG);
44696 if (!radix_tree_tag_get(&cookie->stores, page->index,
44697 FSCACHE_COOKIE_PENDING_TAG)) {
44698 - fscache_stat(&fscache_n_store_radix_deletes);
44699 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44700 xpage = radix_tree_delete(&cookie->stores, page->index);
44701 }
44702 spin_unlock(&cookie->stores_lock);
44703 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
44704
44705 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
44706
44707 - fscache_stat(&fscache_n_attr_changed_calls);
44708 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
44709
44710 if (fscache_object_is_active(object)) {
44711 fscache_stat(&fscache_n_cop_attr_changed);
44712 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44713
44714 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44715
44716 - fscache_stat(&fscache_n_attr_changed);
44717 + fscache_stat_unchecked(&fscache_n_attr_changed);
44718
44719 op = kzalloc(sizeof(*op), GFP_KERNEL);
44720 if (!op) {
44721 - fscache_stat(&fscache_n_attr_changed_nomem);
44722 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
44723 _leave(" = -ENOMEM");
44724 return -ENOMEM;
44725 }
44726 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44727 if (fscache_submit_exclusive_op(object, op) < 0)
44728 goto nobufs;
44729 spin_unlock(&cookie->lock);
44730 - fscache_stat(&fscache_n_attr_changed_ok);
44731 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
44732 fscache_put_operation(op);
44733 _leave(" = 0");
44734 return 0;
44735 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44736 nobufs:
44737 spin_unlock(&cookie->lock);
44738 kfree(op);
44739 - fscache_stat(&fscache_n_attr_changed_nobufs);
44740 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
44741 _leave(" = %d", -ENOBUFS);
44742 return -ENOBUFS;
44743 }
44744 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
44745 /* allocate a retrieval operation and attempt to submit it */
44746 op = kzalloc(sizeof(*op), GFP_NOIO);
44747 if (!op) {
44748 - fscache_stat(&fscache_n_retrievals_nomem);
44749 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44750 return NULL;
44751 }
44752
44753 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44754 return 0;
44755 }
44756
44757 - fscache_stat(&fscache_n_retrievals_wait);
44758 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
44759
44760 jif = jiffies;
44761 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
44762 fscache_wait_bit_interruptible,
44763 TASK_INTERRUPTIBLE) != 0) {
44764 - fscache_stat(&fscache_n_retrievals_intr);
44765 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44766 _leave(" = -ERESTARTSYS");
44767 return -ERESTARTSYS;
44768 }
44769 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44770 */
44771 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44772 struct fscache_retrieval *op,
44773 - atomic_t *stat_op_waits,
44774 - atomic_t *stat_object_dead)
44775 + atomic_unchecked_t *stat_op_waits,
44776 + atomic_unchecked_t *stat_object_dead)
44777 {
44778 int ret;
44779
44780 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44781 goto check_if_dead;
44782
44783 _debug(">>> WT");
44784 - fscache_stat(stat_op_waits);
44785 + fscache_stat_unchecked(stat_op_waits);
44786 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
44787 fscache_wait_bit_interruptible,
44788 TASK_INTERRUPTIBLE) < 0) {
44789 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44790
44791 check_if_dead:
44792 if (unlikely(fscache_object_is_dead(object))) {
44793 - fscache_stat(stat_object_dead);
44794 + fscache_stat_unchecked(stat_object_dead);
44795 return -ENOBUFS;
44796 }
44797 return 0;
44798 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44799
44800 _enter("%p,%p,,,", cookie, page);
44801
44802 - fscache_stat(&fscache_n_retrievals);
44803 + fscache_stat_unchecked(&fscache_n_retrievals);
44804
44805 if (hlist_empty(&cookie->backing_objects))
44806 goto nobufs;
44807 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44808 goto nobufs_unlock;
44809 spin_unlock(&cookie->lock);
44810
44811 - fscache_stat(&fscache_n_retrieval_ops);
44812 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
44813
44814 /* pin the netfs read context in case we need to do the actual netfs
44815 * read because we've encountered a cache read failure */
44816 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44817
44818 error:
44819 if (ret == -ENOMEM)
44820 - fscache_stat(&fscache_n_retrievals_nomem);
44821 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44822 else if (ret == -ERESTARTSYS)
44823 - fscache_stat(&fscache_n_retrievals_intr);
44824 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44825 else if (ret == -ENODATA)
44826 - fscache_stat(&fscache_n_retrievals_nodata);
44827 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44828 else if (ret < 0)
44829 - fscache_stat(&fscache_n_retrievals_nobufs);
44830 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44831 else
44832 - fscache_stat(&fscache_n_retrievals_ok);
44833 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
44834
44835 fscache_put_retrieval(op);
44836 _leave(" = %d", ret);
44837 @@ -429,7 +429,7 @@ nobufs_unlock:
44838 spin_unlock(&cookie->lock);
44839 kfree(op);
44840 nobufs:
44841 - fscache_stat(&fscache_n_retrievals_nobufs);
44842 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44843 _leave(" = -ENOBUFS");
44844 return -ENOBUFS;
44845 }
44846 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44847
44848 _enter("%p,,%d,,,", cookie, *nr_pages);
44849
44850 - fscache_stat(&fscache_n_retrievals);
44851 + fscache_stat_unchecked(&fscache_n_retrievals);
44852
44853 if (hlist_empty(&cookie->backing_objects))
44854 goto nobufs;
44855 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44856 goto nobufs_unlock;
44857 spin_unlock(&cookie->lock);
44858
44859 - fscache_stat(&fscache_n_retrieval_ops);
44860 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
44861
44862 /* pin the netfs read context in case we need to do the actual netfs
44863 * read because we've encountered a cache read failure */
44864 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
44865
44866 error:
44867 if (ret == -ENOMEM)
44868 - fscache_stat(&fscache_n_retrievals_nomem);
44869 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44870 else if (ret == -ERESTARTSYS)
44871 - fscache_stat(&fscache_n_retrievals_intr);
44872 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44873 else if (ret == -ENODATA)
44874 - fscache_stat(&fscache_n_retrievals_nodata);
44875 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44876 else if (ret < 0)
44877 - fscache_stat(&fscache_n_retrievals_nobufs);
44878 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44879 else
44880 - fscache_stat(&fscache_n_retrievals_ok);
44881 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
44882
44883 fscache_put_retrieval(op);
44884 _leave(" = %d", ret);
44885 @@ -545,7 +545,7 @@ nobufs_unlock:
44886 spin_unlock(&cookie->lock);
44887 kfree(op);
44888 nobufs:
44889 - fscache_stat(&fscache_n_retrievals_nobufs);
44890 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44891 _leave(" = -ENOBUFS");
44892 return -ENOBUFS;
44893 }
44894 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44895
44896 _enter("%p,%p,,,", cookie, page);
44897
44898 - fscache_stat(&fscache_n_allocs);
44899 + fscache_stat_unchecked(&fscache_n_allocs);
44900
44901 if (hlist_empty(&cookie->backing_objects))
44902 goto nobufs;
44903 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44904 goto nobufs_unlock;
44905 spin_unlock(&cookie->lock);
44906
44907 - fscache_stat(&fscache_n_alloc_ops);
44908 + fscache_stat_unchecked(&fscache_n_alloc_ops);
44909
44910 ret = fscache_wait_for_retrieval_activation(
44911 object, op,
44912 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
44913
44914 error:
44915 if (ret == -ERESTARTSYS)
44916 - fscache_stat(&fscache_n_allocs_intr);
44917 + fscache_stat_unchecked(&fscache_n_allocs_intr);
44918 else if (ret < 0)
44919 - fscache_stat(&fscache_n_allocs_nobufs);
44920 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44921 else
44922 - fscache_stat(&fscache_n_allocs_ok);
44923 + fscache_stat_unchecked(&fscache_n_allocs_ok);
44924
44925 fscache_put_retrieval(op);
44926 _leave(" = %d", ret);
44927 @@ -625,7 +625,7 @@ nobufs_unlock:
44928 spin_unlock(&cookie->lock);
44929 kfree(op);
44930 nobufs:
44931 - fscache_stat(&fscache_n_allocs_nobufs);
44932 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44933 _leave(" = -ENOBUFS");
44934 return -ENOBUFS;
44935 }
44936 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44937
44938 spin_lock(&cookie->stores_lock);
44939
44940 - fscache_stat(&fscache_n_store_calls);
44941 + fscache_stat_unchecked(&fscache_n_store_calls);
44942
44943 /* find a page to store */
44944 page = NULL;
44945 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44946 page = results[0];
44947 _debug("gang %d [%lx]", n, page->index);
44948 if (page->index > op->store_limit) {
44949 - fscache_stat(&fscache_n_store_pages_over_limit);
44950 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
44951 goto superseded;
44952 }
44953
44954 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
44955 spin_unlock(&cookie->stores_lock);
44956 spin_unlock(&object->lock);
44957
44958 - fscache_stat(&fscache_n_store_pages);
44959 + fscache_stat_unchecked(&fscache_n_store_pages);
44960 fscache_stat(&fscache_n_cop_write_page);
44961 ret = object->cache->ops->write_page(op, page);
44962 fscache_stat_d(&fscache_n_cop_write_page);
44963 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44964 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44965 ASSERT(PageFsCache(page));
44966
44967 - fscache_stat(&fscache_n_stores);
44968 + fscache_stat_unchecked(&fscache_n_stores);
44969
44970 op = kzalloc(sizeof(*op), GFP_NOIO);
44971 if (!op)
44972 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44973 spin_unlock(&cookie->stores_lock);
44974 spin_unlock(&object->lock);
44975
44976 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
44977 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
44978 op->store_limit = object->store_limit;
44979
44980 if (fscache_submit_op(object, &op->op) < 0)
44981 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44982
44983 spin_unlock(&cookie->lock);
44984 radix_tree_preload_end();
44985 - fscache_stat(&fscache_n_store_ops);
44986 - fscache_stat(&fscache_n_stores_ok);
44987 + fscache_stat_unchecked(&fscache_n_store_ops);
44988 + fscache_stat_unchecked(&fscache_n_stores_ok);
44989
44990 /* the work queue now carries its own ref on the object */
44991 fscache_put_operation(&op->op);
44992 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
44993 return 0;
44994
44995 already_queued:
44996 - fscache_stat(&fscache_n_stores_again);
44997 + fscache_stat_unchecked(&fscache_n_stores_again);
44998 already_pending:
44999 spin_unlock(&cookie->stores_lock);
45000 spin_unlock(&object->lock);
45001 spin_unlock(&cookie->lock);
45002 radix_tree_preload_end();
45003 kfree(op);
45004 - fscache_stat(&fscache_n_stores_ok);
45005 + fscache_stat_unchecked(&fscache_n_stores_ok);
45006 _leave(" = 0");
45007 return 0;
45008
45009 @@ -851,14 +851,14 @@ nobufs:
45010 spin_unlock(&cookie->lock);
45011 radix_tree_preload_end();
45012 kfree(op);
45013 - fscache_stat(&fscache_n_stores_nobufs);
45014 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45015 _leave(" = -ENOBUFS");
45016 return -ENOBUFS;
45017
45018 nomem_free:
45019 kfree(op);
45020 nomem:
45021 - fscache_stat(&fscache_n_stores_oom);
45022 + fscache_stat_unchecked(&fscache_n_stores_oom);
45023 _leave(" = -ENOMEM");
45024 return -ENOMEM;
45025 }
45026 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45027 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45028 ASSERTCMP(page, !=, NULL);
45029
45030 - fscache_stat(&fscache_n_uncaches);
45031 + fscache_stat_unchecked(&fscache_n_uncaches);
45032
45033 /* cache withdrawal may beat us to it */
45034 if (!PageFsCache(page))
45035 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45036 unsigned long loop;
45037
45038 #ifdef CONFIG_FSCACHE_STATS
45039 - atomic_add(pagevec->nr, &fscache_n_marks);
45040 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45041 #endif
45042
45043 for (loop = 0; loop < pagevec->nr; loop++) {
45044 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45045 index 4765190..2a067f2 100644
45046 --- a/fs/fscache/stats.c
45047 +++ b/fs/fscache/stats.c
45048 @@ -18,95 +18,95 @@
45049 /*
45050 * operation counters
45051 */
45052 -atomic_t fscache_n_op_pend;
45053 -atomic_t fscache_n_op_run;
45054 -atomic_t fscache_n_op_enqueue;
45055 -atomic_t fscache_n_op_requeue;
45056 -atomic_t fscache_n_op_deferred_release;
45057 -atomic_t fscache_n_op_release;
45058 -atomic_t fscache_n_op_gc;
45059 -atomic_t fscache_n_op_cancelled;
45060 -atomic_t fscache_n_op_rejected;
45061 +atomic_unchecked_t fscache_n_op_pend;
45062 +atomic_unchecked_t fscache_n_op_run;
45063 +atomic_unchecked_t fscache_n_op_enqueue;
45064 +atomic_unchecked_t fscache_n_op_requeue;
45065 +atomic_unchecked_t fscache_n_op_deferred_release;
45066 +atomic_unchecked_t fscache_n_op_release;
45067 +atomic_unchecked_t fscache_n_op_gc;
45068 +atomic_unchecked_t fscache_n_op_cancelled;
45069 +atomic_unchecked_t fscache_n_op_rejected;
45070
45071 -atomic_t fscache_n_attr_changed;
45072 -atomic_t fscache_n_attr_changed_ok;
45073 -atomic_t fscache_n_attr_changed_nobufs;
45074 -atomic_t fscache_n_attr_changed_nomem;
45075 -atomic_t fscache_n_attr_changed_calls;
45076 +atomic_unchecked_t fscache_n_attr_changed;
45077 +atomic_unchecked_t fscache_n_attr_changed_ok;
45078 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45079 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45080 +atomic_unchecked_t fscache_n_attr_changed_calls;
45081
45082 -atomic_t fscache_n_allocs;
45083 -atomic_t fscache_n_allocs_ok;
45084 -atomic_t fscache_n_allocs_wait;
45085 -atomic_t fscache_n_allocs_nobufs;
45086 -atomic_t fscache_n_allocs_intr;
45087 -atomic_t fscache_n_allocs_object_dead;
45088 -atomic_t fscache_n_alloc_ops;
45089 -atomic_t fscache_n_alloc_op_waits;
45090 +atomic_unchecked_t fscache_n_allocs;
45091 +atomic_unchecked_t fscache_n_allocs_ok;
45092 +atomic_unchecked_t fscache_n_allocs_wait;
45093 +atomic_unchecked_t fscache_n_allocs_nobufs;
45094 +atomic_unchecked_t fscache_n_allocs_intr;
45095 +atomic_unchecked_t fscache_n_allocs_object_dead;
45096 +atomic_unchecked_t fscache_n_alloc_ops;
45097 +atomic_unchecked_t fscache_n_alloc_op_waits;
45098
45099 -atomic_t fscache_n_retrievals;
45100 -atomic_t fscache_n_retrievals_ok;
45101 -atomic_t fscache_n_retrievals_wait;
45102 -atomic_t fscache_n_retrievals_nodata;
45103 -atomic_t fscache_n_retrievals_nobufs;
45104 -atomic_t fscache_n_retrievals_intr;
45105 -atomic_t fscache_n_retrievals_nomem;
45106 -atomic_t fscache_n_retrievals_object_dead;
45107 -atomic_t fscache_n_retrieval_ops;
45108 -atomic_t fscache_n_retrieval_op_waits;
45109 +atomic_unchecked_t fscache_n_retrievals;
45110 +atomic_unchecked_t fscache_n_retrievals_ok;
45111 +atomic_unchecked_t fscache_n_retrievals_wait;
45112 +atomic_unchecked_t fscache_n_retrievals_nodata;
45113 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45114 +atomic_unchecked_t fscache_n_retrievals_intr;
45115 +atomic_unchecked_t fscache_n_retrievals_nomem;
45116 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45117 +atomic_unchecked_t fscache_n_retrieval_ops;
45118 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45119
45120 -atomic_t fscache_n_stores;
45121 -atomic_t fscache_n_stores_ok;
45122 -atomic_t fscache_n_stores_again;
45123 -atomic_t fscache_n_stores_nobufs;
45124 -atomic_t fscache_n_stores_oom;
45125 -atomic_t fscache_n_store_ops;
45126 -atomic_t fscache_n_store_calls;
45127 -atomic_t fscache_n_store_pages;
45128 -atomic_t fscache_n_store_radix_deletes;
45129 -atomic_t fscache_n_store_pages_over_limit;
45130 +atomic_unchecked_t fscache_n_stores;
45131 +atomic_unchecked_t fscache_n_stores_ok;
45132 +atomic_unchecked_t fscache_n_stores_again;
45133 +atomic_unchecked_t fscache_n_stores_nobufs;
45134 +atomic_unchecked_t fscache_n_stores_oom;
45135 +atomic_unchecked_t fscache_n_store_ops;
45136 +atomic_unchecked_t fscache_n_store_calls;
45137 +atomic_unchecked_t fscache_n_store_pages;
45138 +atomic_unchecked_t fscache_n_store_radix_deletes;
45139 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45140
45141 -atomic_t fscache_n_store_vmscan_not_storing;
45142 -atomic_t fscache_n_store_vmscan_gone;
45143 -atomic_t fscache_n_store_vmscan_busy;
45144 -atomic_t fscache_n_store_vmscan_cancelled;
45145 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45146 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45147 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45148 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45149
45150 -atomic_t fscache_n_marks;
45151 -atomic_t fscache_n_uncaches;
45152 +atomic_unchecked_t fscache_n_marks;
45153 +atomic_unchecked_t fscache_n_uncaches;
45154
45155 -atomic_t fscache_n_acquires;
45156 -atomic_t fscache_n_acquires_null;
45157 -atomic_t fscache_n_acquires_no_cache;
45158 -atomic_t fscache_n_acquires_ok;
45159 -atomic_t fscache_n_acquires_nobufs;
45160 -atomic_t fscache_n_acquires_oom;
45161 +atomic_unchecked_t fscache_n_acquires;
45162 +atomic_unchecked_t fscache_n_acquires_null;
45163 +atomic_unchecked_t fscache_n_acquires_no_cache;
45164 +atomic_unchecked_t fscache_n_acquires_ok;
45165 +atomic_unchecked_t fscache_n_acquires_nobufs;
45166 +atomic_unchecked_t fscache_n_acquires_oom;
45167
45168 -atomic_t fscache_n_updates;
45169 -atomic_t fscache_n_updates_null;
45170 -atomic_t fscache_n_updates_run;
45171 +atomic_unchecked_t fscache_n_updates;
45172 +atomic_unchecked_t fscache_n_updates_null;
45173 +atomic_unchecked_t fscache_n_updates_run;
45174
45175 -atomic_t fscache_n_relinquishes;
45176 -atomic_t fscache_n_relinquishes_null;
45177 -atomic_t fscache_n_relinquishes_waitcrt;
45178 -atomic_t fscache_n_relinquishes_retire;
45179 +atomic_unchecked_t fscache_n_relinquishes;
45180 +atomic_unchecked_t fscache_n_relinquishes_null;
45181 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45182 +atomic_unchecked_t fscache_n_relinquishes_retire;
45183
45184 -atomic_t fscache_n_cookie_index;
45185 -atomic_t fscache_n_cookie_data;
45186 -atomic_t fscache_n_cookie_special;
45187 +atomic_unchecked_t fscache_n_cookie_index;
45188 +atomic_unchecked_t fscache_n_cookie_data;
45189 +atomic_unchecked_t fscache_n_cookie_special;
45190
45191 -atomic_t fscache_n_object_alloc;
45192 -atomic_t fscache_n_object_no_alloc;
45193 -atomic_t fscache_n_object_lookups;
45194 -atomic_t fscache_n_object_lookups_negative;
45195 -atomic_t fscache_n_object_lookups_positive;
45196 -atomic_t fscache_n_object_lookups_timed_out;
45197 -atomic_t fscache_n_object_created;
45198 -atomic_t fscache_n_object_avail;
45199 -atomic_t fscache_n_object_dead;
45200 +atomic_unchecked_t fscache_n_object_alloc;
45201 +atomic_unchecked_t fscache_n_object_no_alloc;
45202 +atomic_unchecked_t fscache_n_object_lookups;
45203 +atomic_unchecked_t fscache_n_object_lookups_negative;
45204 +atomic_unchecked_t fscache_n_object_lookups_positive;
45205 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45206 +atomic_unchecked_t fscache_n_object_created;
45207 +atomic_unchecked_t fscache_n_object_avail;
45208 +atomic_unchecked_t fscache_n_object_dead;
45209
45210 -atomic_t fscache_n_checkaux_none;
45211 -atomic_t fscache_n_checkaux_okay;
45212 -atomic_t fscache_n_checkaux_update;
45213 -atomic_t fscache_n_checkaux_obsolete;
45214 +atomic_unchecked_t fscache_n_checkaux_none;
45215 +atomic_unchecked_t fscache_n_checkaux_okay;
45216 +atomic_unchecked_t fscache_n_checkaux_update;
45217 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45218
45219 atomic_t fscache_n_cop_alloc_object;
45220 atomic_t fscache_n_cop_lookup_object;
45221 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45222 seq_puts(m, "FS-Cache statistics\n");
45223
45224 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45225 - atomic_read(&fscache_n_cookie_index),
45226 - atomic_read(&fscache_n_cookie_data),
45227 - atomic_read(&fscache_n_cookie_special));
45228 + atomic_read_unchecked(&fscache_n_cookie_index),
45229 + atomic_read_unchecked(&fscache_n_cookie_data),
45230 + atomic_read_unchecked(&fscache_n_cookie_special));
45231
45232 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45233 - atomic_read(&fscache_n_object_alloc),
45234 - atomic_read(&fscache_n_object_no_alloc),
45235 - atomic_read(&fscache_n_object_avail),
45236 - atomic_read(&fscache_n_object_dead));
45237 + atomic_read_unchecked(&fscache_n_object_alloc),
45238 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45239 + atomic_read_unchecked(&fscache_n_object_avail),
45240 + atomic_read_unchecked(&fscache_n_object_dead));
45241 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45242 - atomic_read(&fscache_n_checkaux_none),
45243 - atomic_read(&fscache_n_checkaux_okay),
45244 - atomic_read(&fscache_n_checkaux_update),
45245 - atomic_read(&fscache_n_checkaux_obsolete));
45246 + atomic_read_unchecked(&fscache_n_checkaux_none),
45247 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45248 + atomic_read_unchecked(&fscache_n_checkaux_update),
45249 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45250
45251 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45252 - atomic_read(&fscache_n_marks),
45253 - atomic_read(&fscache_n_uncaches));
45254 + atomic_read_unchecked(&fscache_n_marks),
45255 + atomic_read_unchecked(&fscache_n_uncaches));
45256
45257 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45258 " oom=%u\n",
45259 - atomic_read(&fscache_n_acquires),
45260 - atomic_read(&fscache_n_acquires_null),
45261 - atomic_read(&fscache_n_acquires_no_cache),
45262 - atomic_read(&fscache_n_acquires_ok),
45263 - atomic_read(&fscache_n_acquires_nobufs),
45264 - atomic_read(&fscache_n_acquires_oom));
45265 + atomic_read_unchecked(&fscache_n_acquires),
45266 + atomic_read_unchecked(&fscache_n_acquires_null),
45267 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45268 + atomic_read_unchecked(&fscache_n_acquires_ok),
45269 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45270 + atomic_read_unchecked(&fscache_n_acquires_oom));
45271
45272 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45273 - atomic_read(&fscache_n_object_lookups),
45274 - atomic_read(&fscache_n_object_lookups_negative),
45275 - atomic_read(&fscache_n_object_lookups_positive),
45276 - atomic_read(&fscache_n_object_created),
45277 - atomic_read(&fscache_n_object_lookups_timed_out));
45278 + atomic_read_unchecked(&fscache_n_object_lookups),
45279 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45280 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45281 + atomic_read_unchecked(&fscache_n_object_created),
45282 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45283
45284 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45285 - atomic_read(&fscache_n_updates),
45286 - atomic_read(&fscache_n_updates_null),
45287 - atomic_read(&fscache_n_updates_run));
45288 + atomic_read_unchecked(&fscache_n_updates),
45289 + atomic_read_unchecked(&fscache_n_updates_null),
45290 + atomic_read_unchecked(&fscache_n_updates_run));
45291
45292 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45293 - atomic_read(&fscache_n_relinquishes),
45294 - atomic_read(&fscache_n_relinquishes_null),
45295 - atomic_read(&fscache_n_relinquishes_waitcrt),
45296 - atomic_read(&fscache_n_relinquishes_retire));
45297 + atomic_read_unchecked(&fscache_n_relinquishes),
45298 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45299 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45300 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45301
45302 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45303 - atomic_read(&fscache_n_attr_changed),
45304 - atomic_read(&fscache_n_attr_changed_ok),
45305 - atomic_read(&fscache_n_attr_changed_nobufs),
45306 - atomic_read(&fscache_n_attr_changed_nomem),
45307 - atomic_read(&fscache_n_attr_changed_calls));
45308 + atomic_read_unchecked(&fscache_n_attr_changed),
45309 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45310 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45311 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45312 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45313
45314 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45315 - atomic_read(&fscache_n_allocs),
45316 - atomic_read(&fscache_n_allocs_ok),
45317 - atomic_read(&fscache_n_allocs_wait),
45318 - atomic_read(&fscache_n_allocs_nobufs),
45319 - atomic_read(&fscache_n_allocs_intr));
45320 + atomic_read_unchecked(&fscache_n_allocs),
45321 + atomic_read_unchecked(&fscache_n_allocs_ok),
45322 + atomic_read_unchecked(&fscache_n_allocs_wait),
45323 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45324 + atomic_read_unchecked(&fscache_n_allocs_intr));
45325 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45326 - atomic_read(&fscache_n_alloc_ops),
45327 - atomic_read(&fscache_n_alloc_op_waits),
45328 - atomic_read(&fscache_n_allocs_object_dead));
45329 + atomic_read_unchecked(&fscache_n_alloc_ops),
45330 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45331 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45332
45333 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45334 " int=%u oom=%u\n",
45335 - atomic_read(&fscache_n_retrievals),
45336 - atomic_read(&fscache_n_retrievals_ok),
45337 - atomic_read(&fscache_n_retrievals_wait),
45338 - atomic_read(&fscache_n_retrievals_nodata),
45339 - atomic_read(&fscache_n_retrievals_nobufs),
45340 - atomic_read(&fscache_n_retrievals_intr),
45341 - atomic_read(&fscache_n_retrievals_nomem));
45342 + atomic_read_unchecked(&fscache_n_retrievals),
45343 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45344 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45345 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45346 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45347 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45348 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45349 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45350 - atomic_read(&fscache_n_retrieval_ops),
45351 - atomic_read(&fscache_n_retrieval_op_waits),
45352 - atomic_read(&fscache_n_retrievals_object_dead));
45353 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45354 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45355 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45356
45357 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45358 - atomic_read(&fscache_n_stores),
45359 - atomic_read(&fscache_n_stores_ok),
45360 - atomic_read(&fscache_n_stores_again),
45361 - atomic_read(&fscache_n_stores_nobufs),
45362 - atomic_read(&fscache_n_stores_oom));
45363 + atomic_read_unchecked(&fscache_n_stores),
45364 + atomic_read_unchecked(&fscache_n_stores_ok),
45365 + atomic_read_unchecked(&fscache_n_stores_again),
45366 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45367 + atomic_read_unchecked(&fscache_n_stores_oom));
45368 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45369 - atomic_read(&fscache_n_store_ops),
45370 - atomic_read(&fscache_n_store_calls),
45371 - atomic_read(&fscache_n_store_pages),
45372 - atomic_read(&fscache_n_store_radix_deletes),
45373 - atomic_read(&fscache_n_store_pages_over_limit));
45374 + atomic_read_unchecked(&fscache_n_store_ops),
45375 + atomic_read_unchecked(&fscache_n_store_calls),
45376 + atomic_read_unchecked(&fscache_n_store_pages),
45377 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45378 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45379
45380 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45381 - atomic_read(&fscache_n_store_vmscan_not_storing),
45382 - atomic_read(&fscache_n_store_vmscan_gone),
45383 - atomic_read(&fscache_n_store_vmscan_busy),
45384 - atomic_read(&fscache_n_store_vmscan_cancelled));
45385 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45386 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45387 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45388 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45389
45390 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45391 - atomic_read(&fscache_n_op_pend),
45392 - atomic_read(&fscache_n_op_run),
45393 - atomic_read(&fscache_n_op_enqueue),
45394 - atomic_read(&fscache_n_op_cancelled),
45395 - atomic_read(&fscache_n_op_rejected));
45396 + atomic_read_unchecked(&fscache_n_op_pend),
45397 + atomic_read_unchecked(&fscache_n_op_run),
45398 + atomic_read_unchecked(&fscache_n_op_enqueue),
45399 + atomic_read_unchecked(&fscache_n_op_cancelled),
45400 + atomic_read_unchecked(&fscache_n_op_rejected));
45401 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45402 - atomic_read(&fscache_n_op_deferred_release),
45403 - atomic_read(&fscache_n_op_release),
45404 - atomic_read(&fscache_n_op_gc));
45405 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45406 + atomic_read_unchecked(&fscache_n_op_release),
45407 + atomic_read_unchecked(&fscache_n_op_gc));
45408
45409 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45410 atomic_read(&fscache_n_cop_alloc_object),
45411 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45412 index 3426521..3b75162 100644
45413 --- a/fs/fuse/cuse.c
45414 +++ b/fs/fuse/cuse.c
45415 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45416 INIT_LIST_HEAD(&cuse_conntbl[i]);
45417
45418 /* inherit and extend fuse_dev_operations */
45419 - cuse_channel_fops = fuse_dev_operations;
45420 - cuse_channel_fops.owner = THIS_MODULE;
45421 - cuse_channel_fops.open = cuse_channel_open;
45422 - cuse_channel_fops.release = cuse_channel_release;
45423 + pax_open_kernel();
45424 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45425 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45426 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45427 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45428 + pax_close_kernel();
45429
45430 cuse_class = class_create(THIS_MODULE, "cuse");
45431 if (IS_ERR(cuse_class))
45432 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45433 index 5f3368a..8306426 100644
45434 --- a/fs/fuse/dev.c
45435 +++ b/fs/fuse/dev.c
45436 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45437 ret = 0;
45438 pipe_lock(pipe);
45439
45440 - if (!pipe->readers) {
45441 + if (!atomic_read(&pipe->readers)) {
45442 send_sig(SIGPIPE, current, 0);
45443 if (!ret)
45444 ret = -EPIPE;
45445 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45446 index 2066328..f5add3b 100644
45447 --- a/fs/fuse/dir.c
45448 +++ b/fs/fuse/dir.c
45449 @@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
45450 return link;
45451 }
45452
45453 -static void free_link(char *link)
45454 +static void free_link(const char *link)
45455 {
45456 if (!IS_ERR(link))
45457 free_page((unsigned long) link);
45458 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45459 index 5698746..6086012 100644
45460 --- a/fs/gfs2/inode.c
45461 +++ b/fs/gfs2/inode.c
45462 @@ -1487,7 +1487,7 @@ out:
45463
45464 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45465 {
45466 - char *s = nd_get_link(nd);
45467 + const char *s = nd_get_link(nd);
45468 if (!IS_ERR(s))
45469 kfree(s);
45470 }
45471 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45472 index 1e85a7a..eb4218a 100644
45473 --- a/fs/hugetlbfs/inode.c
45474 +++ b/fs/hugetlbfs/inode.c
45475 @@ -921,7 +921,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45476 .kill_sb = kill_litter_super,
45477 };
45478
45479 -static struct vfsmount *hugetlbfs_vfsmount;
45480 +struct vfsmount *hugetlbfs_vfsmount;
45481
45482 static int can_do_hugetlb_shm(void)
45483 {
45484 diff --git a/fs/inode.c b/fs/inode.c
45485 index 83ab215..8842101 100644
45486 --- a/fs/inode.c
45487 +++ b/fs/inode.c
45488 @@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
45489
45490 #ifdef CONFIG_SMP
45491 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45492 - static atomic_t shared_last_ino;
45493 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45494 + static atomic_unchecked_t shared_last_ino;
45495 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45496
45497 res = next - LAST_INO_BATCH;
45498 }
45499 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45500 index eafb8d3..f423d37 100644
45501 --- a/fs/jffs2/erase.c
45502 +++ b/fs/jffs2/erase.c
45503 @@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45504 struct jffs2_unknown_node marker = {
45505 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45506 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45507 - .totlen = cpu_to_je32(c->cleanmarker_size)
45508 + .totlen = cpu_to_je32(c->cleanmarker_size),
45509 + .hdr_crc = cpu_to_je32(0)
45510 };
45511
45512 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45513 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45514 index 30e8f47..21f600c 100644
45515 --- a/fs/jffs2/wbuf.c
45516 +++ b/fs/jffs2/wbuf.c
45517 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45518 {
45519 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45520 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45521 - .totlen = constant_cpu_to_je32(8)
45522 + .totlen = constant_cpu_to_je32(8),
45523 + .hdr_crc = constant_cpu_to_je32(0)
45524 };
45525
45526 /*
45527 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45528 index 682bca6..86b8e6e 100644
45529 --- a/fs/jfs/super.c
45530 +++ b/fs/jfs/super.c
45531 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
45532
45533 jfs_inode_cachep =
45534 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45535 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45536 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45537 init_once);
45538 if (jfs_inode_cachep == NULL)
45539 return -ENOMEM;
45540 diff --git a/fs/libfs.c b/fs/libfs.c
45541 index 5b2dbb3..7442d54 100644
45542 --- a/fs/libfs.c
45543 +++ b/fs/libfs.c
45544 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45545
45546 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45547 struct dentry *next;
45548 + char d_name[sizeof(next->d_iname)];
45549 + const unsigned char *name;
45550 +
45551 next = list_entry(p, struct dentry, d_u.d_child);
45552 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45553 if (!simple_positive(next)) {
45554 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45555
45556 spin_unlock(&next->d_lock);
45557 spin_unlock(&dentry->d_lock);
45558 - if (filldir(dirent, next->d_name.name,
45559 + name = next->d_name.name;
45560 + if (name == next->d_iname) {
45561 + memcpy(d_name, name, next->d_name.len);
45562 + name = d_name;
45563 + }
45564 + if (filldir(dirent, name,
45565 next->d_name.len, filp->f_pos,
45566 next->d_inode->i_ino,
45567 dt_type(next->d_inode)) < 0)
45568 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
45569 index 8392cb8..80d6193 100644
45570 --- a/fs/lockd/clntproc.c
45571 +++ b/fs/lockd/clntproc.c
45572 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
45573 /*
45574 * Cookie counter for NLM requests
45575 */
45576 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
45577 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
45578
45579 void nlmclnt_next_cookie(struct nlm_cookie *c)
45580 {
45581 - u32 cookie = atomic_inc_return(&nlm_cookie);
45582 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
45583
45584 memcpy(c->data, &cookie, 4);
45585 c->len=4;
45586 diff --git a/fs/locks.c b/fs/locks.c
45587 index 637694b..f84a121 100644
45588 --- a/fs/locks.c
45589 +++ b/fs/locks.c
45590 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
45591 return;
45592
45593 if (filp->f_op && filp->f_op->flock) {
45594 - struct file_lock fl = {
45595 + struct file_lock flock = {
45596 .fl_pid = current->tgid,
45597 .fl_file = filp,
45598 .fl_flags = FL_FLOCK,
45599 .fl_type = F_UNLCK,
45600 .fl_end = OFFSET_MAX,
45601 };
45602 - filp->f_op->flock(filp, F_SETLKW, &fl);
45603 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
45604 - fl.fl_ops->fl_release_private(&fl);
45605 + filp->f_op->flock(filp, F_SETLKW, &flock);
45606 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
45607 + flock.fl_ops->fl_release_private(&flock);
45608 }
45609
45610 lock_flocks();
45611 diff --git a/fs/namei.c b/fs/namei.c
45612 index 46ea9cc..c7cf3a3 100644
45613 --- a/fs/namei.c
45614 +++ b/fs/namei.c
45615 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
45616 if (ret != -EACCES)
45617 return ret;
45618
45619 +#ifdef CONFIG_GRKERNSEC
45620 + /* we'll block if we have to log due to a denied capability use */
45621 + if (mask & MAY_NOT_BLOCK)
45622 + return -ECHILD;
45623 +#endif
45624 +
45625 if (S_ISDIR(inode->i_mode)) {
45626 /* DACs are overridable for directories */
45627 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45628 - return 0;
45629 if (!(mask & MAY_WRITE))
45630 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45631 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45632 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45633 return 0;
45634 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45635 + return 0;
45636 return -EACCES;
45637 }
45638 /*
45639 + * Searching includes executable on directories, else just read.
45640 + */
45641 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45642 + if (mask == MAY_READ)
45643 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45644 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45645 + return 0;
45646 +
45647 + /*
45648 * Read/write DACs are always overridable.
45649 * Executable DACs are overridable when there is
45650 * at least one exec bit set.
45651 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
45652 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45653 return 0;
45654
45655 - /*
45656 - * Searching includes executable on directories, else just read.
45657 - */
45658 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45659 - if (mask == MAY_READ)
45660 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45661 - return 0;
45662 -
45663 return -EACCES;
45664 }
45665
45666 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
45667 return error;
45668 }
45669
45670 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
45671 + dentry->d_inode, dentry, nd->path.mnt)) {
45672 + error = -EACCES;
45673 + *p = ERR_PTR(error); /* no ->put_link(), please */
45674 + path_put(&nd->path);
45675 + return error;
45676 + }
45677 +
45678 nd->last_type = LAST_BIND;
45679 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
45680 error = PTR_ERR(*p);
45681 if (!IS_ERR(*p)) {
45682 - char *s = nd_get_link(nd);
45683 + const char *s = nd_get_link(nd);
45684 error = 0;
45685 if (s)
45686 error = __vfs_follow_link(nd, s);
45687 @@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
45688 if (!err)
45689 err = complete_walk(nd);
45690
45691 + if (!(nd->flags & LOOKUP_PARENT)) {
45692 +#ifdef CONFIG_GRKERNSEC
45693 + if (flags & LOOKUP_RCU) {
45694 + if (!err)
45695 + path_put(&nd->path);
45696 + err = -ECHILD;
45697 + } else
45698 +#endif
45699 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45700 + if (!err)
45701 + path_put(&nd->path);
45702 + err = -ENOENT;
45703 + }
45704 + }
45705 +
45706 if (!err && nd->flags & LOOKUP_DIRECTORY) {
45707 if (!nd->inode->i_op->lookup) {
45708 path_put(&nd->path);
45709 @@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
45710 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
45711
45712 if (likely(!retval)) {
45713 + if (*name != '/' && nd->path.dentry && nd->inode) {
45714 +#ifdef CONFIG_GRKERNSEC
45715 + if (flags & LOOKUP_RCU)
45716 + return -ECHILD;
45717 +#endif
45718 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
45719 + return -ENOENT;
45720 + }
45721 +
45722 if (unlikely(!audit_dummy_context())) {
45723 if (nd->path.dentry && nd->inode)
45724 audit_inode(name, nd->path.dentry);
45725 @@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
45726 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
45727 return -EPERM;
45728
45729 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
45730 + return -EPERM;
45731 + if (gr_handle_rawio(inode))
45732 + return -EPERM;
45733 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
45734 + return -EACCES;
45735 +
45736 return 0;
45737 }
45738
45739 @@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45740 error = complete_walk(nd);
45741 if (error)
45742 return ERR_PTR(error);
45743 +#ifdef CONFIG_GRKERNSEC
45744 + if (nd->flags & LOOKUP_RCU) {
45745 + error = -ECHILD;
45746 + goto exit;
45747 + }
45748 +#endif
45749 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45750 + error = -ENOENT;
45751 + goto exit;
45752 + }
45753 audit_inode(pathname, nd->path.dentry);
45754 if (open_flag & O_CREAT) {
45755 error = -EISDIR;
45756 @@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45757 error = complete_walk(nd);
45758 if (error)
45759 return ERR_PTR(error);
45760 +#ifdef CONFIG_GRKERNSEC
45761 + if (nd->flags & LOOKUP_RCU) {
45762 + error = -ECHILD;
45763 + goto exit;
45764 + }
45765 +#endif
45766 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
45767 + error = -ENOENT;
45768 + goto exit;
45769 + }
45770 audit_inode(pathname, dir);
45771 goto ok;
45772 }
45773 @@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45774 error = complete_walk(nd);
45775 if (error)
45776 return ERR_PTR(error);
45777 +#ifdef CONFIG_GRKERNSEC
45778 + if (nd->flags & LOOKUP_RCU) {
45779 + error = -ECHILD;
45780 + goto exit;
45781 + }
45782 +#endif
45783 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45784 + error = -ENOENT;
45785 + goto exit;
45786 + }
45787
45788 error = -ENOTDIR;
45789 if (nd->flags & LOOKUP_DIRECTORY) {
45790 @@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45791 /* Negative dentry, just create the file */
45792 if (!dentry->d_inode) {
45793 umode_t mode = op->mode;
45794 +
45795 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
45796 + error = -EACCES;
45797 + goto exit_mutex_unlock;
45798 + }
45799 +
45800 if (!IS_POSIXACL(dir->d_inode))
45801 mode &= ~current_umask();
45802 /*
45803 @@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45804 error = vfs_create(dir->d_inode, dentry, mode, nd);
45805 if (error)
45806 goto exit_mutex_unlock;
45807 + else
45808 + gr_handle_create(path->dentry, path->mnt);
45809 mutex_unlock(&dir->d_inode->i_mutex);
45810 dput(nd->path.dentry);
45811 nd->path.dentry = dentry;
45812 @@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45813 /*
45814 * It already exists.
45815 */
45816 +
45817 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
45818 + error = -ENOENT;
45819 + goto exit_mutex_unlock;
45820 + }
45821 +
45822 + /* only check if O_CREAT is specified, all other checks need to go
45823 + into may_open */
45824 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
45825 + error = -EACCES;
45826 + goto exit_mutex_unlock;
45827 + }
45828 +
45829 mutex_unlock(&dir->d_inode->i_mutex);
45830 audit_inode(pathname, path->dentry);
45831
45832 @@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
45833 *path = nd.path;
45834 return dentry;
45835 eexist:
45836 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
45837 + dput(dentry);
45838 + dentry = ERR_PTR(-ENOENT);
45839 + goto fail;
45840 + }
45841 dput(dentry);
45842 dentry = ERR_PTR(-EEXIST);
45843 fail:
45844 @@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
45845 }
45846 EXPORT_SYMBOL(user_path_create);
45847
45848 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
45849 +{
45850 + char *tmp = getname(pathname);
45851 + struct dentry *res;
45852 + if (IS_ERR(tmp))
45853 + return ERR_CAST(tmp);
45854 + res = kern_path_create(dfd, tmp, path, is_dir);
45855 + if (IS_ERR(res))
45856 + putname(tmp);
45857 + else
45858 + *to = tmp;
45859 + return res;
45860 +}
45861 +
45862 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
45863 {
45864 int error = may_create(dir, dentry);
45865 @@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
45866 error = mnt_want_write(path.mnt);
45867 if (error)
45868 goto out_dput;
45869 +
45870 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
45871 + error = -EPERM;
45872 + goto out_drop_write;
45873 + }
45874 +
45875 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
45876 + error = -EACCES;
45877 + goto out_drop_write;
45878 + }
45879 +
45880 error = security_path_mknod(&path, dentry, mode, dev);
45881 if (error)
45882 goto out_drop_write;
45883 @@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
45884 }
45885 out_drop_write:
45886 mnt_drop_write(path.mnt);
45887 +
45888 + if (!error)
45889 + gr_handle_create(dentry, path.mnt);
45890 out_dput:
45891 dput(dentry);
45892 mutex_unlock(&path.dentry->d_inode->i_mutex);
45893 @@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
45894 error = mnt_want_write(path.mnt);
45895 if (error)
45896 goto out_dput;
45897 +
45898 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
45899 + error = -EACCES;
45900 + goto out_drop_write;
45901 + }
45902 +
45903 error = security_path_mkdir(&path, dentry, mode);
45904 if (error)
45905 goto out_drop_write;
45906 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
45907 out_drop_write:
45908 mnt_drop_write(path.mnt);
45909 +
45910 + if (!error)
45911 + gr_handle_create(dentry, path.mnt);
45912 out_dput:
45913 dput(dentry);
45914 mutex_unlock(&path.dentry->d_inode->i_mutex);
45915 @@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
45916 char * name;
45917 struct dentry *dentry;
45918 struct nameidata nd;
45919 + ino_t saved_ino = 0;
45920 + dev_t saved_dev = 0;
45921
45922 error = user_path_parent(dfd, pathname, &nd, &name);
45923 if (error)
45924 @@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
45925 error = -ENOENT;
45926 goto exit3;
45927 }
45928 +
45929 + saved_ino = dentry->d_inode->i_ino;
45930 + saved_dev = gr_get_dev_from_dentry(dentry);
45931 +
45932 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
45933 + error = -EACCES;
45934 + goto exit3;
45935 + }
45936 +
45937 error = mnt_want_write(nd.path.mnt);
45938 if (error)
45939 goto exit3;
45940 @@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
45941 if (error)
45942 goto exit4;
45943 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
45944 + if (!error && (saved_dev || saved_ino))
45945 + gr_handle_delete(saved_ino, saved_dev);
45946 exit4:
45947 mnt_drop_write(nd.path.mnt);
45948 exit3:
45949 @@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45950 struct dentry *dentry;
45951 struct nameidata nd;
45952 struct inode *inode = NULL;
45953 + ino_t saved_ino = 0;
45954 + dev_t saved_dev = 0;
45955
45956 error = user_path_parent(dfd, pathname, &nd, &name);
45957 if (error)
45958 @@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45959 if (!inode)
45960 goto slashes;
45961 ihold(inode);
45962 +
45963 + if (inode->i_nlink <= 1) {
45964 + saved_ino = inode->i_ino;
45965 + saved_dev = gr_get_dev_from_dentry(dentry);
45966 + }
45967 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
45968 + error = -EACCES;
45969 + goto exit2;
45970 + }
45971 +
45972 error = mnt_want_write(nd.path.mnt);
45973 if (error)
45974 goto exit2;
45975 @@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
45976 if (error)
45977 goto exit3;
45978 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
45979 + if (!error && (saved_ino || saved_dev))
45980 + gr_handle_delete(saved_ino, saved_dev);
45981 exit3:
45982 mnt_drop_write(nd.path.mnt);
45983 exit2:
45984 @@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
45985 error = mnt_want_write(path.mnt);
45986 if (error)
45987 goto out_dput;
45988 +
45989 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
45990 + error = -EACCES;
45991 + goto out_drop_write;
45992 + }
45993 +
45994 error = security_path_symlink(&path, dentry, from);
45995 if (error)
45996 goto out_drop_write;
45997 error = vfs_symlink(path.dentry->d_inode, dentry, from);
45998 + if (!error)
45999 + gr_handle_create(dentry, path.mnt);
46000 out_drop_write:
46001 mnt_drop_write(path.mnt);
46002 out_dput:
46003 @@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46004 {
46005 struct dentry *new_dentry;
46006 struct path old_path, new_path;
46007 + char *to = NULL;
46008 int how = 0;
46009 int error;
46010
46011 @@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46012 if (error)
46013 return error;
46014
46015 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46016 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46017 error = PTR_ERR(new_dentry);
46018 if (IS_ERR(new_dentry))
46019 goto out;
46020 @@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46021 error = mnt_want_write(new_path.mnt);
46022 if (error)
46023 goto out_dput;
46024 +
46025 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46026 + old_path.dentry->d_inode,
46027 + old_path.dentry->d_inode->i_mode, to)) {
46028 + error = -EACCES;
46029 + goto out_drop_write;
46030 + }
46031 +
46032 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46033 + old_path.dentry, old_path.mnt, to)) {
46034 + error = -EACCES;
46035 + goto out_drop_write;
46036 + }
46037 +
46038 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46039 if (error)
46040 goto out_drop_write;
46041 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46042 + if (!error)
46043 + gr_handle_create(new_dentry, new_path.mnt);
46044 out_drop_write:
46045 mnt_drop_write(new_path.mnt);
46046 out_dput:
46047 + putname(to);
46048 dput(new_dentry);
46049 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46050 path_put(&new_path);
46051 @@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46052 if (new_dentry == trap)
46053 goto exit5;
46054
46055 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46056 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46057 + to);
46058 + if (error)
46059 + goto exit5;
46060 +
46061 error = mnt_want_write(oldnd.path.mnt);
46062 if (error)
46063 goto exit5;
46064 @@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46065 goto exit6;
46066 error = vfs_rename(old_dir->d_inode, old_dentry,
46067 new_dir->d_inode, new_dentry);
46068 + if (!error)
46069 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46070 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46071 exit6:
46072 mnt_drop_write(oldnd.path.mnt);
46073 exit5:
46074 @@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46075
46076 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46077 {
46078 + char tmpbuf[64];
46079 + const char *newlink;
46080 int len;
46081
46082 len = PTR_ERR(link);
46083 @@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46084 len = strlen(link);
46085 if (len > (unsigned) buflen)
46086 len = buflen;
46087 - if (copy_to_user(buffer, link, len))
46088 +
46089 + if (len < sizeof(tmpbuf)) {
46090 + memcpy(tmpbuf, link, len);
46091 + newlink = tmpbuf;
46092 + } else
46093 + newlink = link;
46094 +
46095 + if (copy_to_user(buffer, newlink, len))
46096 len = -EFAULT;
46097 out:
46098 return len;
46099 diff --git a/fs/namespace.c b/fs/namespace.c
46100 index e608199..9609cb9 100644
46101 --- a/fs/namespace.c
46102 +++ b/fs/namespace.c
46103 @@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
46104 if (!(sb->s_flags & MS_RDONLY))
46105 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46106 up_write(&sb->s_umount);
46107 +
46108 + gr_log_remount(mnt->mnt_devname, retval);
46109 +
46110 return retval;
46111 }
46112
46113 @@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
46114 br_write_unlock(vfsmount_lock);
46115 up_write(&namespace_sem);
46116 release_mounts(&umount_list);
46117 +
46118 + gr_log_unmount(mnt->mnt_devname, retval);
46119 +
46120 return retval;
46121 }
46122
46123 @@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46124 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46125 MS_STRICTATIME);
46126
46127 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46128 + retval = -EPERM;
46129 + goto dput_out;
46130 + }
46131 +
46132 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46133 + retval = -EPERM;
46134 + goto dput_out;
46135 + }
46136 +
46137 if (flags & MS_REMOUNT)
46138 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46139 data_page);
46140 @@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46141 dev_name, data_page);
46142 dput_out:
46143 path_put(&path);
46144 +
46145 + gr_log_mount(dev_name, dir_name, retval);
46146 +
46147 return retval;
46148 }
46149
46150 @@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46151 if (error)
46152 goto out2;
46153
46154 + if (gr_handle_chroot_pivot()) {
46155 + error = -EPERM;
46156 + goto out2;
46157 + }
46158 +
46159 get_fs_root(current->fs, &root);
46160 error = lock_mount(&old);
46161 if (error)
46162 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46163 index f649fba..236bf92 100644
46164 --- a/fs/nfs/inode.c
46165 +++ b/fs/nfs/inode.c
46166 @@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46167 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46168 nfsi->attrtimeo_timestamp = jiffies;
46169
46170 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46171 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46172 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46173 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46174 else
46175 @@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46176 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46177 }
46178
46179 -static atomic_long_t nfs_attr_generation_counter;
46180 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46181
46182 static unsigned long nfs_read_attr_generation_counter(void)
46183 {
46184 - return atomic_long_read(&nfs_attr_generation_counter);
46185 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46186 }
46187
46188 unsigned long nfs_inc_attr_generation_counter(void)
46189 {
46190 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46191 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46192 }
46193
46194 void nfs_fattr_init(struct nfs_fattr *fattr)
46195 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46196 index edf6d3e..bdd1da7 100644
46197 --- a/fs/nfsd/vfs.c
46198 +++ b/fs/nfsd/vfs.c
46199 @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46200 } else {
46201 oldfs = get_fs();
46202 set_fs(KERNEL_DS);
46203 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46204 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46205 set_fs(oldfs);
46206 }
46207
46208 @@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46209
46210 /* Write the data. */
46211 oldfs = get_fs(); set_fs(KERNEL_DS);
46212 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46213 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46214 set_fs(oldfs);
46215 if (host_err < 0)
46216 goto out_nfserr;
46217 @@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46218 */
46219
46220 oldfs = get_fs(); set_fs(KERNEL_DS);
46221 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
46222 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
46223 set_fs(oldfs);
46224
46225 if (host_err < 0)
46226 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46227 index 3568c8a..e0240d8 100644
46228 --- a/fs/notify/fanotify/fanotify_user.c
46229 +++ b/fs/notify/fanotify/fanotify_user.c
46230 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46231 goto out_close_fd;
46232
46233 ret = -EFAULT;
46234 - if (copy_to_user(buf, &fanotify_event_metadata,
46235 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46236 + copy_to_user(buf, &fanotify_event_metadata,
46237 fanotify_event_metadata.event_len))
46238 goto out_kill_access_response;
46239
46240 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46241 index ee18815..7aa5d01 100644
46242 --- a/fs/notify/notification.c
46243 +++ b/fs/notify/notification.c
46244 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46245 * get set to 0 so it will never get 'freed'
46246 */
46247 static struct fsnotify_event *q_overflow_event;
46248 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46249 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46250
46251 /**
46252 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46253 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46254 */
46255 u32 fsnotify_get_cookie(void)
46256 {
46257 - return atomic_inc_return(&fsnotify_sync_cookie);
46258 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46259 }
46260 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46261
46262 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46263 index 99e3610..02c1068 100644
46264 --- a/fs/ntfs/dir.c
46265 +++ b/fs/ntfs/dir.c
46266 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46267 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46268 ~(s64)(ndir->itype.index.block_size - 1)));
46269 /* Bounds checks. */
46270 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46271 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46272 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46273 "inode 0x%lx or driver bug.", vdir->i_ino);
46274 goto err_out;
46275 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46276 index c587e2d..3641eaa 100644
46277 --- a/fs/ntfs/file.c
46278 +++ b/fs/ntfs/file.c
46279 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46280 #endif /* NTFS_RW */
46281 };
46282
46283 -const struct file_operations ntfs_empty_file_ops = {};
46284 +const struct file_operations ntfs_empty_file_ops __read_only;
46285
46286 -const struct inode_operations ntfs_empty_inode_ops = {};
46287 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46288 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46289 index 210c352..a174f83 100644
46290 --- a/fs/ocfs2/localalloc.c
46291 +++ b/fs/ocfs2/localalloc.c
46292 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46293 goto bail;
46294 }
46295
46296 - atomic_inc(&osb->alloc_stats.moves);
46297 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46298
46299 bail:
46300 if (handle)
46301 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46302 index d355e6e..578d905 100644
46303 --- a/fs/ocfs2/ocfs2.h
46304 +++ b/fs/ocfs2/ocfs2.h
46305 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46306
46307 struct ocfs2_alloc_stats
46308 {
46309 - atomic_t moves;
46310 - atomic_t local_data;
46311 - atomic_t bitmap_data;
46312 - atomic_t bg_allocs;
46313 - atomic_t bg_extends;
46314 + atomic_unchecked_t moves;
46315 + atomic_unchecked_t local_data;
46316 + atomic_unchecked_t bitmap_data;
46317 + atomic_unchecked_t bg_allocs;
46318 + atomic_unchecked_t bg_extends;
46319 };
46320
46321 enum ocfs2_local_alloc_state
46322 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46323 index ba5d97e..c77db25 100644
46324 --- a/fs/ocfs2/suballoc.c
46325 +++ b/fs/ocfs2/suballoc.c
46326 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46327 mlog_errno(status);
46328 goto bail;
46329 }
46330 - atomic_inc(&osb->alloc_stats.bg_extends);
46331 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46332
46333 /* You should never ask for this much metadata */
46334 BUG_ON(bits_wanted >
46335 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46336 mlog_errno(status);
46337 goto bail;
46338 }
46339 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46340 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46341
46342 *suballoc_loc = res.sr_bg_blkno;
46343 *suballoc_bit_start = res.sr_bit_offset;
46344 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46345 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46346 res->sr_bits);
46347
46348 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46349 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46350
46351 BUG_ON(res->sr_bits != 1);
46352
46353 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46354 mlog_errno(status);
46355 goto bail;
46356 }
46357 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46358 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46359
46360 BUG_ON(res.sr_bits != 1);
46361
46362 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46363 cluster_start,
46364 num_clusters);
46365 if (!status)
46366 - atomic_inc(&osb->alloc_stats.local_data);
46367 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46368 } else {
46369 if (min_clusters > (osb->bitmap_cpg - 1)) {
46370 /* The only paths asking for contiguousness
46371 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46372 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46373 res.sr_bg_blkno,
46374 res.sr_bit_offset);
46375 - atomic_inc(&osb->alloc_stats.bitmap_data);
46376 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46377 *num_clusters = res.sr_bits;
46378 }
46379 }
46380 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46381 index 604e12c..8426483 100644
46382 --- a/fs/ocfs2/super.c
46383 +++ b/fs/ocfs2/super.c
46384 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46385 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46386 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46387 "Stats",
46388 - atomic_read(&osb->alloc_stats.bitmap_data),
46389 - atomic_read(&osb->alloc_stats.local_data),
46390 - atomic_read(&osb->alloc_stats.bg_allocs),
46391 - atomic_read(&osb->alloc_stats.moves),
46392 - atomic_read(&osb->alloc_stats.bg_extends));
46393 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46394 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46395 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46396 + atomic_read_unchecked(&osb->alloc_stats.moves),
46397 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46398
46399 out += snprintf(buf + out, len - out,
46400 "%10s => State: %u Descriptor: %llu Size: %u bits "
46401 @@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46402 spin_lock_init(&osb->osb_xattr_lock);
46403 ocfs2_init_steal_slots(osb);
46404
46405 - atomic_set(&osb->alloc_stats.moves, 0);
46406 - atomic_set(&osb->alloc_stats.local_data, 0);
46407 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46408 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46409 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46410 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46411 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46412 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46413 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46414 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46415
46416 /* Copy the blockcheck stats from the superblock probe */
46417 osb->osb_ecc_stats = *stats;
46418 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46419 index 5d22872..523db20 100644
46420 --- a/fs/ocfs2/symlink.c
46421 +++ b/fs/ocfs2/symlink.c
46422 @@ -142,7 +142,7 @@ bail:
46423
46424 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46425 {
46426 - char *link = nd_get_link(nd);
46427 + const char *link = nd_get_link(nd);
46428 if (!IS_ERR(link))
46429 kfree(link);
46430 }
46431 diff --git a/fs/open.c b/fs/open.c
46432 index 77becc0..aad7bd9 100644
46433 --- a/fs/open.c
46434 +++ b/fs/open.c
46435 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46436 error = locks_verify_truncate(inode, NULL, length);
46437 if (!error)
46438 error = security_path_truncate(&path);
46439 +
46440 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46441 + error = -EACCES;
46442 +
46443 if (!error)
46444 error = do_truncate(path.dentry, length, 0, NULL);
46445
46446 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46447 if (__mnt_is_readonly(path.mnt))
46448 res = -EROFS;
46449
46450 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46451 + res = -EACCES;
46452 +
46453 out_path_release:
46454 path_put(&path);
46455 out:
46456 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46457 if (error)
46458 goto dput_and_out;
46459
46460 + gr_log_chdir(path.dentry, path.mnt);
46461 +
46462 set_fs_pwd(current->fs, &path);
46463
46464 dput_and_out:
46465 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46466 goto out_putf;
46467
46468 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46469 +
46470 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46471 + error = -EPERM;
46472 +
46473 + if (!error)
46474 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46475 +
46476 if (!error)
46477 set_fs_pwd(current->fs, &file->f_path);
46478 out_putf:
46479 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46480 if (error)
46481 goto dput_and_out;
46482
46483 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46484 + goto dput_and_out;
46485 +
46486 set_fs_root(current->fs, &path);
46487 +
46488 + gr_handle_chroot_chdir(&path);
46489 +
46490 error = 0;
46491 dput_and_out:
46492 path_put(&path);
46493 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
46494 if (error)
46495 return error;
46496 mutex_lock(&inode->i_mutex);
46497 +
46498 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46499 + error = -EACCES;
46500 + goto out_unlock;
46501 + }
46502 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46503 + error = -EACCES;
46504 + goto out_unlock;
46505 + }
46506 +
46507 error = security_path_chmod(path, mode);
46508 if (error)
46509 goto out_unlock;
46510 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46511 int error;
46512 struct iattr newattrs;
46513
46514 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
46515 + return -EACCES;
46516 +
46517 newattrs.ia_valid = ATTR_CTIME;
46518 if (user != (uid_t) -1) {
46519 newattrs.ia_valid |= ATTR_UID;
46520 diff --git a/fs/pipe.c b/fs/pipe.c
46521 index a932ced..6495412 100644
46522 --- a/fs/pipe.c
46523 +++ b/fs/pipe.c
46524 @@ -420,9 +420,9 @@ redo:
46525 }
46526 if (bufs) /* More to do? */
46527 continue;
46528 - if (!pipe->writers)
46529 + if (!atomic_read(&pipe->writers))
46530 break;
46531 - if (!pipe->waiting_writers) {
46532 + if (!atomic_read(&pipe->waiting_writers)) {
46533 /* syscall merging: Usually we must not sleep
46534 * if O_NONBLOCK is set, or if we got some data.
46535 * But if a writer sleeps in kernel space, then
46536 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46537 mutex_lock(&inode->i_mutex);
46538 pipe = inode->i_pipe;
46539
46540 - if (!pipe->readers) {
46541 + if (!atomic_read(&pipe->readers)) {
46542 send_sig(SIGPIPE, current, 0);
46543 ret = -EPIPE;
46544 goto out;
46545 @@ -530,7 +530,7 @@ redo1:
46546 for (;;) {
46547 int bufs;
46548
46549 - if (!pipe->readers) {
46550 + if (!atomic_read(&pipe->readers)) {
46551 send_sig(SIGPIPE, current, 0);
46552 if (!ret)
46553 ret = -EPIPE;
46554 @@ -616,9 +616,9 @@ redo2:
46555 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46556 do_wakeup = 0;
46557 }
46558 - pipe->waiting_writers++;
46559 + atomic_inc(&pipe->waiting_writers);
46560 pipe_wait(pipe);
46561 - pipe->waiting_writers--;
46562 + atomic_dec(&pipe->waiting_writers);
46563 }
46564 out:
46565 mutex_unlock(&inode->i_mutex);
46566 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46567 mask = 0;
46568 if (filp->f_mode & FMODE_READ) {
46569 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
46570 - if (!pipe->writers && filp->f_version != pipe->w_counter)
46571 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
46572 mask |= POLLHUP;
46573 }
46574
46575 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46576 * Most Unices do not set POLLERR for FIFOs but on Linux they
46577 * behave exactly like pipes for poll().
46578 */
46579 - if (!pipe->readers)
46580 + if (!atomic_read(&pipe->readers))
46581 mask |= POLLERR;
46582 }
46583
46584 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
46585
46586 mutex_lock(&inode->i_mutex);
46587 pipe = inode->i_pipe;
46588 - pipe->readers -= decr;
46589 - pipe->writers -= decw;
46590 + atomic_sub(decr, &pipe->readers);
46591 + atomic_sub(decw, &pipe->writers);
46592
46593 - if (!pipe->readers && !pipe->writers) {
46594 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
46595 free_pipe_info(inode);
46596 } else {
46597 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
46598 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
46599
46600 if (inode->i_pipe) {
46601 ret = 0;
46602 - inode->i_pipe->readers++;
46603 + atomic_inc(&inode->i_pipe->readers);
46604 }
46605
46606 mutex_unlock(&inode->i_mutex);
46607 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
46608
46609 if (inode->i_pipe) {
46610 ret = 0;
46611 - inode->i_pipe->writers++;
46612 + atomic_inc(&inode->i_pipe->writers);
46613 }
46614
46615 mutex_unlock(&inode->i_mutex);
46616 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
46617 if (inode->i_pipe) {
46618 ret = 0;
46619 if (filp->f_mode & FMODE_READ)
46620 - inode->i_pipe->readers++;
46621 + atomic_inc(&inode->i_pipe->readers);
46622 if (filp->f_mode & FMODE_WRITE)
46623 - inode->i_pipe->writers++;
46624 + atomic_inc(&inode->i_pipe->writers);
46625 }
46626
46627 mutex_unlock(&inode->i_mutex);
46628 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
46629 inode->i_pipe = NULL;
46630 }
46631
46632 -static struct vfsmount *pipe_mnt __read_mostly;
46633 +struct vfsmount *pipe_mnt __read_mostly;
46634
46635 /*
46636 * pipefs_dname() is called from d_path().
46637 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
46638 goto fail_iput;
46639 inode->i_pipe = pipe;
46640
46641 - pipe->readers = pipe->writers = 1;
46642 + atomic_set(&pipe->readers, 1);
46643 + atomic_set(&pipe->writers, 1);
46644 inode->i_fop = &rdwr_pipefifo_fops;
46645
46646 /*
46647 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
46648 index 15af622..0e9f4467 100644
46649 --- a/fs/proc/Kconfig
46650 +++ b/fs/proc/Kconfig
46651 @@ -30,12 +30,12 @@ config PROC_FS
46652
46653 config PROC_KCORE
46654 bool "/proc/kcore support" if !ARM
46655 - depends on PROC_FS && MMU
46656 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46657
46658 config PROC_VMCORE
46659 bool "/proc/vmcore support"
46660 - depends on PROC_FS && CRASH_DUMP
46661 - default y
46662 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46663 + default n
46664 help
46665 Exports the dump image of crashed kernel in ELF format.
46666
46667 @@ -59,8 +59,8 @@ config PROC_SYSCTL
46668 limited in memory.
46669
46670 config PROC_PAGE_MONITOR
46671 - default y
46672 - depends on PROC_FS && MMU
46673 + default n
46674 + depends on PROC_FS && MMU && !GRKERNSEC
46675 bool "Enable /proc page monitoring" if EXPERT
46676 help
46677 Various /proc files exist to monitor process memory utilization:
46678 diff --git a/fs/proc/array.c b/fs/proc/array.c
46679 index c602b8d..a7de642 100644
46680 --- a/fs/proc/array.c
46681 +++ b/fs/proc/array.c
46682 @@ -60,6 +60,7 @@
46683 #include <linux/tty.h>
46684 #include <linux/string.h>
46685 #include <linux/mman.h>
46686 +#include <linux/grsecurity.h>
46687 #include <linux/proc_fs.h>
46688 #include <linux/ioport.h>
46689 #include <linux/uaccess.h>
46690 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
46691 seq_putc(m, '\n');
46692 }
46693
46694 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46695 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
46696 +{
46697 + if (p->mm)
46698 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
46699 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
46700 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
46701 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
46702 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
46703 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
46704 + else
46705 + seq_printf(m, "PaX:\t-----\n");
46706 +}
46707 +#endif
46708 +
46709 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46710 struct pid *pid, struct task_struct *task)
46711 {
46712 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46713 task_cpus_allowed(m, task);
46714 cpuset_task_status_allowed(m, task);
46715 task_context_switch_counts(m, task);
46716 +
46717 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46718 + task_pax(m, task);
46719 +#endif
46720 +
46721 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
46722 + task_grsec_rbac(m, task);
46723 +#endif
46724 +
46725 return 0;
46726 }
46727
46728 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46729 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46730 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46731 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46732 +#endif
46733 +
46734 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46735 struct pid *pid, struct task_struct *task, int whole)
46736 {
46737 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46738 char tcomm[sizeof(task->comm)];
46739 unsigned long flags;
46740
46741 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46742 + if (current->exec_id != m->exec_id) {
46743 + gr_log_badprocpid("stat");
46744 + return 0;
46745 + }
46746 +#endif
46747 +
46748 state = *get_task_state(task);
46749 vsize = eip = esp = 0;
46750 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
46751 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46752 gtime = task->gtime;
46753 }
46754
46755 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46756 + if (PAX_RAND_FLAGS(mm)) {
46757 + eip = 0;
46758 + esp = 0;
46759 + wchan = 0;
46760 + }
46761 +#endif
46762 +#ifdef CONFIG_GRKERNSEC_HIDESYM
46763 + wchan = 0;
46764 + eip =0;
46765 + esp =0;
46766 +#endif
46767 +
46768 /* scale priority and nice values from timeslices to -20..20 */
46769 /* to make it look like a "normal" Unix priority/nice value */
46770 priority = task_prio(task);
46771 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46772 vsize,
46773 mm ? get_mm_rss(mm) : 0,
46774 rsslim,
46775 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46776 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
46777 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
46778 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
46779 +#else
46780 mm ? (permitted ? mm->start_code : 1) : 0,
46781 mm ? (permitted ? mm->end_code : 1) : 0,
46782 (permitted && mm) ? mm->start_stack : 0,
46783 +#endif
46784 esp,
46785 eip,
46786 /* The signal information here is obsolete.
46787 @@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46788 struct pid *pid, struct task_struct *task)
46789 {
46790 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
46791 - struct mm_struct *mm = get_task_mm(task);
46792 + struct mm_struct *mm;
46793
46794 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46795 + if (current->exec_id != m->exec_id) {
46796 + gr_log_badprocpid("statm");
46797 + return 0;
46798 + }
46799 +#endif
46800 + mm = get_task_mm(task);
46801 if (mm) {
46802 size = task_statm(mm, &shared, &text, &data, &resident);
46803 mmput(mm);
46804 @@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46805
46806 return 0;
46807 }
46808 +
46809 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46810 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
46811 +{
46812 + u32 curr_ip = 0;
46813 + unsigned long flags;
46814 +
46815 + if (lock_task_sighand(task, &flags)) {
46816 + curr_ip = task->signal->curr_ip;
46817 + unlock_task_sighand(task, &flags);
46818 + }
46819 +
46820 + return sprintf(buffer, "%pI4\n", &curr_ip);
46821 +}
46822 +#endif
46823 diff --git a/fs/proc/base.c b/fs/proc/base.c
46824 index d4548dd..d101f84 100644
46825 --- a/fs/proc/base.c
46826 +++ b/fs/proc/base.c
46827 @@ -109,6 +109,14 @@ struct pid_entry {
46828 union proc_op op;
46829 };
46830
46831 +struct getdents_callback {
46832 + struct linux_dirent __user * current_dir;
46833 + struct linux_dirent __user * previous;
46834 + struct file * file;
46835 + int count;
46836 + int error;
46837 +};
46838 +
46839 #define NOD(NAME, MODE, IOP, FOP, OP) { \
46840 .name = (NAME), \
46841 .len = sizeof(NAME) - 1, \
46842 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
46843 if (!mm->arg_end)
46844 goto out_mm; /* Shh! No looking before we're done */
46845
46846 + if (gr_acl_handle_procpidmem(task))
46847 + goto out_mm;
46848 +
46849 len = mm->arg_end - mm->arg_start;
46850
46851 if (len > PAGE_SIZE)
46852 @@ -240,12 +251,28 @@ out:
46853 return res;
46854 }
46855
46856 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46857 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46858 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46859 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46860 +#endif
46861 +
46862 static int proc_pid_auxv(struct task_struct *task, char *buffer)
46863 {
46864 struct mm_struct *mm = mm_for_maps(task);
46865 int res = PTR_ERR(mm);
46866 if (mm && !IS_ERR(mm)) {
46867 unsigned int nwords = 0;
46868 +
46869 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46870 + /* allow if we're currently ptracing this task */
46871 + if (PAX_RAND_FLAGS(mm) &&
46872 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
46873 + mmput(mm);
46874 + return 0;
46875 + }
46876 +#endif
46877 +
46878 do {
46879 nwords += 2;
46880 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
46881 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
46882 }
46883
46884
46885 -#ifdef CONFIG_KALLSYMS
46886 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46887 /*
46888 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
46889 * Returns the resolved symbol. If that fails, simply return the address.
46890 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
46891 mutex_unlock(&task->signal->cred_guard_mutex);
46892 }
46893
46894 -#ifdef CONFIG_STACKTRACE
46895 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46896
46897 #define MAX_STACK_TRACE_DEPTH 64
46898
46899 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
46900 return count;
46901 }
46902
46903 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46904 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46905 static int proc_pid_syscall(struct task_struct *task, char *buffer)
46906 {
46907 long nr;
46908 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
46909 /************************************************************************/
46910
46911 /* permission checks */
46912 -static int proc_fd_access_allowed(struct inode *inode)
46913 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
46914 {
46915 struct task_struct *task;
46916 int allowed = 0;
46917 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
46918 */
46919 task = get_proc_task(inode);
46920 if (task) {
46921 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46922 + if (log)
46923 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46924 + else
46925 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
46926 put_task_struct(task);
46927 }
46928 return allowed;
46929 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
46930 struct task_struct *task,
46931 int hide_pid_min)
46932 {
46933 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46934 + return false;
46935 +
46936 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46937 + rcu_read_lock();
46938 + {
46939 + const struct cred *tmpcred = current_cred();
46940 + const struct cred *cred = __task_cred(task);
46941 +
46942 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
46943 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46944 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46945 +#endif
46946 + ) {
46947 + rcu_read_unlock();
46948 + return true;
46949 + }
46950 + }
46951 + rcu_read_unlock();
46952 +
46953 + if (!pid->hide_pid)
46954 + return false;
46955 +#endif
46956 +
46957 if (pid->hide_pid < hide_pid_min)
46958 return true;
46959 if (in_group_p(pid->pid_gid))
46960 return true;
46961 +
46962 return ptrace_may_access(task, PTRACE_MODE_READ);
46963 }
46964
46965 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
46966 put_task_struct(task);
46967
46968 if (!has_perms) {
46969 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46970 + {
46971 +#else
46972 if (pid->hide_pid == 2) {
46973 +#endif
46974 /*
46975 * Let's make getdents(), stat(), and open()
46976 * consistent with each other. If a process
46977 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
46978 file->f_mode |= FMODE_UNSIGNED_OFFSET;
46979 file->private_data = mm;
46980
46981 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46982 + file->f_version = current->exec_id;
46983 +#endif
46984 +
46985 return 0;
46986 }
46987
46988 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
46989 ssize_t copied;
46990 char *page;
46991
46992 +#ifdef CONFIG_GRKERNSEC
46993 + if (write)
46994 + return -EPERM;
46995 +#endif
46996 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46997 + if (file->f_version != current->exec_id) {
46998 + gr_log_badprocpid("mem");
46999 + return 0;
47000 + }
47001 +#endif
47002 +
47003 if (!mm)
47004 return 0;
47005
47006 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47007 if (!task)
47008 goto out_no_task;
47009
47010 + if (gr_acl_handle_procpidmem(task))
47011 + goto out;
47012 +
47013 ret = -ENOMEM;
47014 page = (char *)__get_free_page(GFP_TEMPORARY);
47015 if (!page)
47016 @@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47017 path_put(&nd->path);
47018
47019 /* Are we allowed to snoop on the tasks file descriptors? */
47020 - if (!proc_fd_access_allowed(inode))
47021 + if (!proc_fd_access_allowed(inode, 0))
47022 goto out;
47023
47024 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47025 @@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47026 struct path path;
47027
47028 /* Are we allowed to snoop on the tasks file descriptors? */
47029 - if (!proc_fd_access_allowed(inode))
47030 - goto out;
47031 + /* logging this is needed for learning on chromium to work properly,
47032 + but we don't want to flood the logs from 'ps' which does a readlink
47033 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47034 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47035 + */
47036 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47037 + if (!proc_fd_access_allowed(inode,0))
47038 + goto out;
47039 + } else {
47040 + if (!proc_fd_access_allowed(inode,1))
47041 + goto out;
47042 + }
47043
47044 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47045 if (error)
47046 @@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47047 rcu_read_lock();
47048 cred = __task_cred(task);
47049 inode->i_uid = cred->euid;
47050 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47051 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47052 +#else
47053 inode->i_gid = cred->egid;
47054 +#endif
47055 rcu_read_unlock();
47056 }
47057 security_task_to_inode(task, inode);
47058 @@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47059 return -ENOENT;
47060 }
47061 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47062 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47063 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47064 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47065 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47066 +#endif
47067 task_dumpable(task)) {
47068 cred = __task_cred(task);
47069 stat->uid = cred->euid;
47070 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47071 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47072 +#else
47073 stat->gid = cred->egid;
47074 +#endif
47075 }
47076 }
47077 rcu_read_unlock();
47078 @@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47079
47080 if (task) {
47081 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47082 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47083 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47084 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47085 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47086 +#endif
47087 task_dumpable(task)) {
47088 rcu_read_lock();
47089 cred = __task_cred(task);
47090 inode->i_uid = cred->euid;
47091 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47092 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47093 +#else
47094 inode->i_gid = cred->egid;
47095 +#endif
47096 rcu_read_unlock();
47097 } else {
47098 inode->i_uid = 0;
47099 @@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47100 int fd = proc_fd(inode);
47101
47102 if (task) {
47103 - files = get_files_struct(task);
47104 + if (!gr_acl_handle_procpidmem(task))
47105 + files = get_files_struct(task);
47106 put_task_struct(task);
47107 }
47108 if (files) {
47109 @@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
47110 */
47111 static int proc_fd_permission(struct inode *inode, int mask)
47112 {
47113 + struct task_struct *task;
47114 int rv = generic_permission(inode, mask);
47115 - if (rv == 0)
47116 - return 0;
47117 +
47118 if (task_pid(current) == proc_pid(inode))
47119 rv = 0;
47120 +
47121 + task = get_proc_task(inode);
47122 + if (task == NULL)
47123 + return rv;
47124 +
47125 + if (gr_acl_handle_procpidmem(task))
47126 + rv = -EACCES;
47127 +
47128 + put_task_struct(task);
47129 +
47130 return rv;
47131 }
47132
47133 @@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47134 if (!task)
47135 goto out_no_task;
47136
47137 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47138 + goto out;
47139 +
47140 /*
47141 * Yes, it does not scale. And it should not. Don't add
47142 * new entries into /proc/<tgid>/ without very good reasons.
47143 @@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
47144 if (!task)
47145 goto out_no_task;
47146
47147 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47148 + goto out;
47149 +
47150 ret = 0;
47151 i = filp->f_pos;
47152 switch (i) {
47153 @@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47154 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47155 void *cookie)
47156 {
47157 - char *s = nd_get_link(nd);
47158 + const char *s = nd_get_link(nd);
47159 if (!IS_ERR(s))
47160 __putname(s);
47161 }
47162 @@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47163 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47164 #endif
47165 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47166 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47167 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47168 INF("syscall", S_IRUGO, proc_pid_syscall),
47169 #endif
47170 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47171 @@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47172 #ifdef CONFIG_SECURITY
47173 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47174 #endif
47175 -#ifdef CONFIG_KALLSYMS
47176 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47177 INF("wchan", S_IRUGO, proc_pid_wchan),
47178 #endif
47179 -#ifdef CONFIG_STACKTRACE
47180 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47181 ONE("stack", S_IRUGO, proc_pid_stack),
47182 #endif
47183 #ifdef CONFIG_SCHEDSTATS
47184 @@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47185 #ifdef CONFIG_HARDWALL
47186 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47187 #endif
47188 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47189 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47190 +#endif
47191 };
47192
47193 static int proc_tgid_base_readdir(struct file * filp,
47194 @@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47195 if (!inode)
47196 goto out;
47197
47198 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47199 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47200 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47201 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47202 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47203 +#else
47204 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47205 +#endif
47206 inode->i_op = &proc_tgid_base_inode_operations;
47207 inode->i_fop = &proc_tgid_base_operations;
47208 inode->i_flags|=S_IMMUTABLE;
47209 @@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47210 if (!task)
47211 goto out;
47212
47213 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47214 + goto out_put_task;
47215 +
47216 result = proc_pid_instantiate(dir, dentry, task, NULL);
47217 +out_put_task:
47218 put_task_struct(task);
47219 out:
47220 return result;
47221 @@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47222 static int fake_filldir(void *buf, const char *name, int namelen,
47223 loff_t offset, u64 ino, unsigned d_type)
47224 {
47225 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
47226 + __buf->error = -EINVAL;
47227 return 0;
47228 }
47229
47230 @@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
47231 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47232 #endif
47233 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47234 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47235 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47236 INF("syscall", S_IRUGO, proc_pid_syscall),
47237 #endif
47238 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47239 @@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
47240 #ifdef CONFIG_SECURITY
47241 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47242 #endif
47243 -#ifdef CONFIG_KALLSYMS
47244 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47245 INF("wchan", S_IRUGO, proc_pid_wchan),
47246 #endif
47247 -#ifdef CONFIG_STACKTRACE
47248 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47249 ONE("stack", S_IRUGO, proc_pid_stack),
47250 #endif
47251 #ifdef CONFIG_SCHEDSTATS
47252 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47253 index 82676e3..5f8518a 100644
47254 --- a/fs/proc/cmdline.c
47255 +++ b/fs/proc/cmdline.c
47256 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47257
47258 static int __init proc_cmdline_init(void)
47259 {
47260 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47261 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47262 +#else
47263 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47264 +#endif
47265 return 0;
47266 }
47267 module_init(proc_cmdline_init);
47268 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47269 index b143471..bb105e5 100644
47270 --- a/fs/proc/devices.c
47271 +++ b/fs/proc/devices.c
47272 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47273
47274 static int __init proc_devices_init(void)
47275 {
47276 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47277 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47278 +#else
47279 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47280 +#endif
47281 return 0;
47282 }
47283 module_init(proc_devices_init);
47284 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47285 index 84fd323..f698a32 100644
47286 --- a/fs/proc/inode.c
47287 +++ b/fs/proc/inode.c
47288 @@ -21,12 +21,18 @@
47289 #include <linux/seq_file.h>
47290 #include <linux/slab.h>
47291 #include <linux/mount.h>
47292 +#include <linux/grsecurity.h>
47293
47294 #include <asm/system.h>
47295 #include <asm/uaccess.h>
47296
47297 #include "internal.h"
47298
47299 +#ifdef CONFIG_PROC_SYSCTL
47300 +extern const struct inode_operations proc_sys_inode_operations;
47301 +extern const struct inode_operations proc_sys_dir_operations;
47302 +#endif
47303 +
47304 static void proc_evict_inode(struct inode *inode)
47305 {
47306 struct proc_dir_entry *de;
47307 @@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
47308 ns_ops = PROC_I(inode)->ns_ops;
47309 if (ns_ops && ns_ops->put)
47310 ns_ops->put(PROC_I(inode)->ns);
47311 +
47312 +#ifdef CONFIG_PROC_SYSCTL
47313 + if (inode->i_op == &proc_sys_inode_operations ||
47314 + inode->i_op == &proc_sys_dir_operations)
47315 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47316 +#endif
47317 +
47318 }
47319
47320 static struct kmem_cache * proc_inode_cachep;
47321 @@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47322 if (de->mode) {
47323 inode->i_mode = de->mode;
47324 inode->i_uid = de->uid;
47325 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47326 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47327 +#else
47328 inode->i_gid = de->gid;
47329 +#endif
47330 }
47331 if (de->size)
47332 inode->i_size = de->size;
47333 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47334 index 2925775..4f08fae 100644
47335 --- a/fs/proc/internal.h
47336 +++ b/fs/proc/internal.h
47337 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47338 struct pid *pid, struct task_struct *task);
47339 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47340 struct pid *pid, struct task_struct *task);
47341 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47342 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47343 +#endif
47344 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47345
47346 extern const struct file_operations proc_maps_operations;
47347 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47348 index d245cb2..f4e8498 100644
47349 --- a/fs/proc/kcore.c
47350 +++ b/fs/proc/kcore.c
47351 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47352 * the addresses in the elf_phdr on our list.
47353 */
47354 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47355 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47356 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47357 + if (tsz > buflen)
47358 tsz = buflen;
47359 -
47360 +
47361 while (buflen) {
47362 struct kcore_list *m;
47363
47364 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47365 kfree(elf_buf);
47366 } else {
47367 if (kern_addr_valid(start)) {
47368 - unsigned long n;
47369 + char *elf_buf;
47370 + mm_segment_t oldfs;
47371
47372 - n = copy_to_user(buffer, (char *)start, tsz);
47373 - /*
47374 - * We cannot distingush between fault on source
47375 - * and fault on destination. When this happens
47376 - * we clear too and hope it will trigger the
47377 - * EFAULT again.
47378 - */
47379 - if (n) {
47380 - if (clear_user(buffer + tsz - n,
47381 - n))
47382 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47383 + if (!elf_buf)
47384 + return -ENOMEM;
47385 + oldfs = get_fs();
47386 + set_fs(KERNEL_DS);
47387 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47388 + set_fs(oldfs);
47389 + if (copy_to_user(buffer, elf_buf, tsz)) {
47390 + kfree(elf_buf);
47391 return -EFAULT;
47392 + }
47393 }
47394 + set_fs(oldfs);
47395 + kfree(elf_buf);
47396 } else {
47397 if (clear_user(buffer, tsz))
47398 return -EFAULT;
47399 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47400
47401 static int open_kcore(struct inode *inode, struct file *filp)
47402 {
47403 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47404 + return -EPERM;
47405 +#endif
47406 if (!capable(CAP_SYS_RAWIO))
47407 return -EPERM;
47408 if (kcore_need_update)
47409 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47410 index 80e4645..53e5fcf 100644
47411 --- a/fs/proc/meminfo.c
47412 +++ b/fs/proc/meminfo.c
47413 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47414 vmi.used >> 10,
47415 vmi.largest_chunk >> 10
47416 #ifdef CONFIG_MEMORY_FAILURE
47417 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47418 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47419 #endif
47420 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47421 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47422 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47423 index b1822dd..df622cb 100644
47424 --- a/fs/proc/nommu.c
47425 +++ b/fs/proc/nommu.c
47426 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47427 if (len < 1)
47428 len = 1;
47429 seq_printf(m, "%*c", len, ' ');
47430 - seq_path(m, &file->f_path, "");
47431 + seq_path(m, &file->f_path, "\n\\");
47432 }
47433
47434 seq_putc(m, '\n');
47435 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47436 index 06e1cc1..177cd98 100644
47437 --- a/fs/proc/proc_net.c
47438 +++ b/fs/proc/proc_net.c
47439 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47440 struct task_struct *task;
47441 struct nsproxy *ns;
47442 struct net *net = NULL;
47443 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47444 + const struct cred *cred = current_cred();
47445 +#endif
47446 +
47447 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47448 + if (cred->fsuid)
47449 + return net;
47450 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47451 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47452 + return net;
47453 +#endif
47454
47455 rcu_read_lock();
47456 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47457 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47458 index a6b6217..1e0579d 100644
47459 --- a/fs/proc/proc_sysctl.c
47460 +++ b/fs/proc/proc_sysctl.c
47461 @@ -9,11 +9,13 @@
47462 #include <linux/namei.h>
47463 #include "internal.h"
47464
47465 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
47466 +
47467 static const struct dentry_operations proc_sys_dentry_operations;
47468 static const struct file_operations proc_sys_file_operations;
47469 -static const struct inode_operations proc_sys_inode_operations;
47470 +const struct inode_operations proc_sys_inode_operations;
47471 static const struct file_operations proc_sys_dir_file_operations;
47472 -static const struct inode_operations proc_sys_dir_operations;
47473 +const struct inode_operations proc_sys_dir_operations;
47474
47475 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47476 {
47477 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47478
47479 err = NULL;
47480 d_set_d_op(dentry, &proc_sys_dentry_operations);
47481 +
47482 + gr_handle_proc_create(dentry, inode);
47483 +
47484 d_add(dentry, inode);
47485
47486 + if (gr_handle_sysctl(p, MAY_EXEC))
47487 + err = ERR_PTR(-ENOENT);
47488 +
47489 out:
47490 sysctl_head_finish(head);
47491 return err;
47492 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47493 if (!table->proc_handler)
47494 goto out;
47495
47496 +#ifdef CONFIG_GRKERNSEC
47497 + error = -EPERM;
47498 + if (write && !capable(CAP_SYS_ADMIN))
47499 + goto out;
47500 +#endif
47501 +
47502 /* careful: calling conventions are nasty here */
47503 res = count;
47504 error = table->proc_handler(table, write, buf, &res, ppos);
47505 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
47506 return -ENOMEM;
47507 } else {
47508 d_set_d_op(child, &proc_sys_dentry_operations);
47509 +
47510 + gr_handle_proc_create(child, inode);
47511 +
47512 d_add(child, inode);
47513 }
47514 } else {
47515 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
47516 if (*pos < file->f_pos)
47517 continue;
47518
47519 + if (gr_handle_sysctl(table, 0))
47520 + continue;
47521 +
47522 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
47523 if (res)
47524 return res;
47525 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
47526 if (IS_ERR(head))
47527 return PTR_ERR(head);
47528
47529 + if (table && gr_handle_sysctl(table, MAY_EXEC))
47530 + return -ENOENT;
47531 +
47532 generic_fillattr(inode, stat);
47533 if (table)
47534 stat->mode = (stat->mode & S_IFMT) | table->mode;
47535 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
47536 .llseek = generic_file_llseek,
47537 };
47538
47539 -static const struct inode_operations proc_sys_inode_operations = {
47540 +const struct inode_operations proc_sys_inode_operations = {
47541 .permission = proc_sys_permission,
47542 .setattr = proc_sys_setattr,
47543 .getattr = proc_sys_getattr,
47544 };
47545
47546 -static const struct inode_operations proc_sys_dir_operations = {
47547 +const struct inode_operations proc_sys_dir_operations = {
47548 .lookup = proc_sys_lookup,
47549 .permission = proc_sys_permission,
47550 .setattr = proc_sys_setattr,
47551 diff --git a/fs/proc/root.c b/fs/proc/root.c
47552 index 46a15d8..335631a 100644
47553 --- a/fs/proc/root.c
47554 +++ b/fs/proc/root.c
47555 @@ -187,7 +187,15 @@ void __init proc_root_init(void)
47556 #ifdef CONFIG_PROC_DEVICETREE
47557 proc_device_tree_init();
47558 #endif
47559 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47560 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47561 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
47562 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47563 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47564 +#endif
47565 +#else
47566 proc_mkdir("bus", NULL);
47567 +#endif
47568 proc_sys_init();
47569 }
47570
47571 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
47572 index 7dcd2a2..b2f410e 100644
47573 --- a/fs/proc/task_mmu.c
47574 +++ b/fs/proc/task_mmu.c
47575 @@ -11,6 +11,7 @@
47576 #include <linux/rmap.h>
47577 #include <linux/swap.h>
47578 #include <linux/swapops.h>
47579 +#include <linux/grsecurity.h>
47580
47581 #include <asm/elf.h>
47582 #include <asm/uaccess.h>
47583 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47584 "VmExe:\t%8lu kB\n"
47585 "VmLib:\t%8lu kB\n"
47586 "VmPTE:\t%8lu kB\n"
47587 - "VmSwap:\t%8lu kB\n",
47588 - hiwater_vm << (PAGE_SHIFT-10),
47589 + "VmSwap:\t%8lu kB\n"
47590 +
47591 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47592 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
47593 +#endif
47594 +
47595 + ,hiwater_vm << (PAGE_SHIFT-10),
47596 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
47597 mm->locked_vm << (PAGE_SHIFT-10),
47598 mm->pinned_vm << (PAGE_SHIFT-10),
47599 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47600 data << (PAGE_SHIFT-10),
47601 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
47602 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
47603 - swap << (PAGE_SHIFT-10));
47604 + swap << (PAGE_SHIFT-10)
47605 +
47606 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47607 + , mm->context.user_cs_base, mm->context.user_cs_limit
47608 +#endif
47609 +
47610 + );
47611 }
47612
47613 unsigned long task_vsize(struct mm_struct *mm)
47614 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
47615 return ret;
47616 }
47617
47618 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47619 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47620 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47621 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47622 +#endif
47623 +
47624 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47625 {
47626 struct mm_struct *mm = vma->vm_mm;
47627 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47628 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
47629 }
47630
47631 - /* We don't show the stack guard page in /proc/maps */
47632 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47633 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
47634 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
47635 +#else
47636 start = vma->vm_start;
47637 - if (stack_guard_page_start(vma, start))
47638 - start += PAGE_SIZE;
47639 end = vma->vm_end;
47640 - if (stack_guard_page_end(vma, end))
47641 - end -= PAGE_SIZE;
47642 +#endif
47643
47644 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
47645 start,
47646 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47647 flags & VM_WRITE ? 'w' : '-',
47648 flags & VM_EXEC ? 'x' : '-',
47649 flags & VM_MAYSHARE ? 's' : 'p',
47650 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47651 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
47652 +#else
47653 pgoff,
47654 +#endif
47655 MAJOR(dev), MINOR(dev), ino, &len);
47656
47657 /*
47658 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47659 */
47660 if (file) {
47661 pad_len_spaces(m, len);
47662 - seq_path(m, &file->f_path, "\n");
47663 + seq_path(m, &file->f_path, "\n\\");
47664 } else {
47665 const char *name = arch_vma_name(vma);
47666 if (!name) {
47667 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47668 if (vma->vm_start <= mm->brk &&
47669 vma->vm_end >= mm->start_brk) {
47670 name = "[heap]";
47671 - } else if (vma->vm_start <= mm->start_stack &&
47672 - vma->vm_end >= mm->start_stack) {
47673 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
47674 + (vma->vm_start <= mm->start_stack &&
47675 + vma->vm_end >= mm->start_stack)) {
47676 name = "[stack]";
47677 }
47678 } else {
47679 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
47680 struct proc_maps_private *priv = m->private;
47681 struct task_struct *task = priv->task;
47682
47683 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47684 + if (current->exec_id != m->exec_id) {
47685 + gr_log_badprocpid("maps");
47686 + return 0;
47687 + }
47688 +#endif
47689 +
47690 show_map_vma(m, vma);
47691
47692 if (m->count < m->size) /* vma is copied successfully */
47693 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
47694 .private = &mss,
47695 };
47696
47697 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47698 + if (current->exec_id != m->exec_id) {
47699 + gr_log_badprocpid("smaps");
47700 + return 0;
47701 + }
47702 +#endif
47703 memset(&mss, 0, sizeof mss);
47704 - mss.vma = vma;
47705 - /* mmap_sem is held in m_start */
47706 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47707 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47708 -
47709 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47710 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
47711 +#endif
47712 + mss.vma = vma;
47713 + /* mmap_sem is held in m_start */
47714 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47715 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47716 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47717 + }
47718 +#endif
47719 show_map_vma(m, vma);
47720
47721 seq_printf(m,
47722 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
47723 "KernelPageSize: %8lu kB\n"
47724 "MMUPageSize: %8lu kB\n"
47725 "Locked: %8lu kB\n",
47726 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47727 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
47728 +#else
47729 (vma->vm_end - vma->vm_start) >> 10,
47730 +#endif
47731 mss.resident >> 10,
47732 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
47733 mss.shared_clean >> 10,
47734 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
47735 int n;
47736 char buffer[50];
47737
47738 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47739 + if (current->exec_id != m->exec_id) {
47740 + gr_log_badprocpid("numa_maps");
47741 + return 0;
47742 + }
47743 +#endif
47744 +
47745 if (!mm)
47746 return 0;
47747
47748 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
47749 mpol_to_str(buffer, sizeof(buffer), pol, 0);
47750 mpol_cond_put(pol);
47751
47752 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47753 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
47754 +#else
47755 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
47756 +#endif
47757
47758 if (file) {
47759 seq_printf(m, " file=");
47760 - seq_path(m, &file->f_path, "\n\t= ");
47761 + seq_path(m, &file->f_path, "\n\t\\= ");
47762 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
47763 seq_printf(m, " heap");
47764 } else if (vma->vm_start <= mm->start_stack &&
47765 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
47766 index 980de54..2a4db5f 100644
47767 --- a/fs/proc/task_nommu.c
47768 +++ b/fs/proc/task_nommu.c
47769 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47770 else
47771 bytes += kobjsize(mm);
47772
47773 - if (current->fs && current->fs->users > 1)
47774 + if (current->fs && atomic_read(&current->fs->users) > 1)
47775 sbytes += kobjsize(current->fs);
47776 else
47777 bytes += kobjsize(current->fs);
47778 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
47779
47780 if (file) {
47781 pad_len_spaces(m, len);
47782 - seq_path(m, &file->f_path, "");
47783 + seq_path(m, &file->f_path, "\n\\");
47784 } else if (mm) {
47785 if (vma->vm_start <= mm->start_stack &&
47786 vma->vm_end >= mm->start_stack) {
47787 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
47788 index d67908b..d13f6a6 100644
47789 --- a/fs/quota/netlink.c
47790 +++ b/fs/quota/netlink.c
47791 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
47792 void quota_send_warning(short type, unsigned int id, dev_t dev,
47793 const char warntype)
47794 {
47795 - static atomic_t seq;
47796 + static atomic_unchecked_t seq;
47797 struct sk_buff *skb;
47798 void *msg_head;
47799 int ret;
47800 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
47801 "VFS: Not enough memory to send quota warning.\n");
47802 return;
47803 }
47804 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
47805 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
47806 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
47807 if (!msg_head) {
47808 printk(KERN_ERR
47809 diff --git a/fs/readdir.c b/fs/readdir.c
47810 index 356f715..c918d38 100644
47811 --- a/fs/readdir.c
47812 +++ b/fs/readdir.c
47813 @@ -17,6 +17,7 @@
47814 #include <linux/security.h>
47815 #include <linux/syscalls.h>
47816 #include <linux/unistd.h>
47817 +#include <linux/namei.h>
47818
47819 #include <asm/uaccess.h>
47820
47821 @@ -67,6 +68,7 @@ struct old_linux_dirent {
47822
47823 struct readdir_callback {
47824 struct old_linux_dirent __user * dirent;
47825 + struct file * file;
47826 int result;
47827 };
47828
47829 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
47830 buf->result = -EOVERFLOW;
47831 return -EOVERFLOW;
47832 }
47833 +
47834 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47835 + return 0;
47836 +
47837 buf->result++;
47838 dirent = buf->dirent;
47839 if (!access_ok(VERIFY_WRITE, dirent,
47840 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
47841
47842 buf.result = 0;
47843 buf.dirent = dirent;
47844 + buf.file = file;
47845
47846 error = vfs_readdir(file, fillonedir, &buf);
47847 if (buf.result)
47848 @@ -142,6 +149,7 @@ struct linux_dirent {
47849 struct getdents_callback {
47850 struct linux_dirent __user * current_dir;
47851 struct linux_dirent __user * previous;
47852 + struct file * file;
47853 int count;
47854 int error;
47855 };
47856 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
47857 buf->error = -EOVERFLOW;
47858 return -EOVERFLOW;
47859 }
47860 +
47861 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47862 + return 0;
47863 +
47864 dirent = buf->previous;
47865 if (dirent) {
47866 if (__put_user(offset, &dirent->d_off))
47867 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
47868 buf.previous = NULL;
47869 buf.count = count;
47870 buf.error = 0;
47871 + buf.file = file;
47872
47873 error = vfs_readdir(file, filldir, &buf);
47874 if (error >= 0)
47875 @@ -229,6 +242,7 @@ out:
47876 struct getdents_callback64 {
47877 struct linux_dirent64 __user * current_dir;
47878 struct linux_dirent64 __user * previous;
47879 + struct file *file;
47880 int count;
47881 int error;
47882 };
47883 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
47884 buf->error = -EINVAL; /* only used if we fail.. */
47885 if (reclen > buf->count)
47886 return -EINVAL;
47887 +
47888 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47889 + return 0;
47890 +
47891 dirent = buf->previous;
47892 if (dirent) {
47893 if (__put_user(offset, &dirent->d_off))
47894 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
47895
47896 buf.current_dir = dirent;
47897 buf.previous = NULL;
47898 + buf.file = file;
47899 buf.count = count;
47900 buf.error = 0;
47901
47902 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
47903 error = buf.error;
47904 lastdirent = buf.previous;
47905 if (lastdirent) {
47906 - typeof(lastdirent->d_off) d_off = file->f_pos;
47907 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47908 if (__put_user(d_off, &lastdirent->d_off))
47909 error = -EFAULT;
47910 else
47911 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
47912 index 60c0804..d814f98 100644
47913 --- a/fs/reiserfs/do_balan.c
47914 +++ b/fs/reiserfs/do_balan.c
47915 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
47916 return;
47917 }
47918
47919 - atomic_inc(&(fs_generation(tb->tb_sb)));
47920 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
47921 do_balance_starts(tb);
47922
47923 /* balance leaf returns 0 except if combining L R and S into
47924 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
47925 index 7a99811..a7c96c4 100644
47926 --- a/fs/reiserfs/procfs.c
47927 +++ b/fs/reiserfs/procfs.c
47928 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
47929 "SMALL_TAILS " : "NO_TAILS ",
47930 replay_only(sb) ? "REPLAY_ONLY " : "",
47931 convert_reiserfs(sb) ? "CONV " : "",
47932 - atomic_read(&r->s_generation_counter),
47933 + atomic_read_unchecked(&r->s_generation_counter),
47934 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
47935 SF(s_do_balance), SF(s_unneeded_left_neighbor),
47936 SF(s_good_search_by_key_reada), SF(s_bmaps),
47937 diff --git a/fs/select.c b/fs/select.c
47938 index e782258..3b4b44c 100644
47939 --- a/fs/select.c
47940 +++ b/fs/select.c
47941 @@ -20,6 +20,7 @@
47942 #include <linux/module.h>
47943 #include <linux/slab.h>
47944 #include <linux/poll.h>
47945 +#include <linux/security.h>
47946 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
47947 #include <linux/file.h>
47948 #include <linux/fdtable.h>
47949 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
47950 struct poll_list *walk = head;
47951 unsigned long todo = nfds;
47952
47953 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
47954 if (nfds > rlimit(RLIMIT_NOFILE))
47955 return -EINVAL;
47956
47957 diff --git a/fs/seq_file.c b/fs/seq_file.c
47958 index 4023d6b..53b39c5 100644
47959 --- a/fs/seq_file.c
47960 +++ b/fs/seq_file.c
47961 @@ -9,6 +9,7 @@
47962 #include <linux/module.h>
47963 #include <linux/seq_file.h>
47964 #include <linux/slab.h>
47965 +#include <linux/sched.h>
47966
47967 #include <asm/uaccess.h>
47968 #include <asm/page.h>
47969 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
47970 memset(p, 0, sizeof(*p));
47971 mutex_init(&p->lock);
47972 p->op = op;
47973 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47974 + p->exec_id = current->exec_id;
47975 +#endif
47976
47977 /*
47978 * Wrappers around seq_open(e.g. swaps_open) need to be
47979 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
47980 return 0;
47981 }
47982 if (!m->buf) {
47983 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
47984 + m->size = PAGE_SIZE;
47985 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
47986 if (!m->buf)
47987 return -ENOMEM;
47988 }
47989 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
47990 Eoverflow:
47991 m->op->stop(m, p);
47992 kfree(m->buf);
47993 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
47994 + m->size <<= 1;
47995 + m->buf = kmalloc(m->size, GFP_KERNEL);
47996 return !m->buf ? -ENOMEM : -EAGAIN;
47997 }
47998
47999 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48000 m->version = file->f_version;
48001 /* grab buffer if we didn't have one */
48002 if (!m->buf) {
48003 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48004 + m->size = PAGE_SIZE;
48005 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48006 if (!m->buf)
48007 goto Enomem;
48008 }
48009 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48010 goto Fill;
48011 m->op->stop(m, p);
48012 kfree(m->buf);
48013 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48014 + m->size <<= 1;
48015 + m->buf = kmalloc(m->size, GFP_KERNEL);
48016 if (!m->buf)
48017 goto Enomem;
48018 m->count = 0;
48019 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
48020 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48021 void *data)
48022 {
48023 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48024 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48025 int res = -ENOMEM;
48026
48027 if (op) {
48028 diff --git a/fs/splice.c b/fs/splice.c
48029 index 1ec0493..d6ab5c2 100644
48030 --- a/fs/splice.c
48031 +++ b/fs/splice.c
48032 @@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48033 pipe_lock(pipe);
48034
48035 for (;;) {
48036 - if (!pipe->readers) {
48037 + if (!atomic_read(&pipe->readers)) {
48038 send_sig(SIGPIPE, current, 0);
48039 if (!ret)
48040 ret = -EPIPE;
48041 @@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48042 do_wakeup = 0;
48043 }
48044
48045 - pipe->waiting_writers++;
48046 + atomic_inc(&pipe->waiting_writers);
48047 pipe_wait(pipe);
48048 - pipe->waiting_writers--;
48049 + atomic_dec(&pipe->waiting_writers);
48050 }
48051
48052 pipe_unlock(pipe);
48053 @@ -559,7 +559,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48054 old_fs = get_fs();
48055 set_fs(get_ds());
48056 /* The cast to a user pointer is valid due to the set_fs() */
48057 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48058 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48059 set_fs(old_fs);
48060
48061 return res;
48062 @@ -574,7 +574,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48063 old_fs = get_fs();
48064 set_fs(get_ds());
48065 /* The cast to a user pointer is valid due to the set_fs() */
48066 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48067 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48068 set_fs(old_fs);
48069
48070 return res;
48071 @@ -625,7 +625,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48072 goto err;
48073
48074 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48075 - vec[i].iov_base = (void __user *) page_address(page);
48076 + vec[i].iov_base = (void __force_user *) page_address(page);
48077 vec[i].iov_len = this_len;
48078 spd.pages[i] = page;
48079 spd.nr_pages++;
48080 @@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48081 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48082 {
48083 while (!pipe->nrbufs) {
48084 - if (!pipe->writers)
48085 + if (!atomic_read(&pipe->writers))
48086 return 0;
48087
48088 - if (!pipe->waiting_writers && sd->num_spliced)
48089 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48090 return 0;
48091
48092 if (sd->flags & SPLICE_F_NONBLOCK)
48093 @@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48094 * out of the pipe right after the splice_to_pipe(). So set
48095 * PIPE_READERS appropriately.
48096 */
48097 - pipe->readers = 1;
48098 + atomic_set(&pipe->readers, 1);
48099
48100 current->splice_pipe = pipe;
48101 }
48102 @@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48103 ret = -ERESTARTSYS;
48104 break;
48105 }
48106 - if (!pipe->writers)
48107 + if (!atomic_read(&pipe->writers))
48108 break;
48109 - if (!pipe->waiting_writers) {
48110 + if (!atomic_read(&pipe->waiting_writers)) {
48111 if (flags & SPLICE_F_NONBLOCK) {
48112 ret = -EAGAIN;
48113 break;
48114 @@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48115 pipe_lock(pipe);
48116
48117 while (pipe->nrbufs >= pipe->buffers) {
48118 - if (!pipe->readers) {
48119 + if (!atomic_read(&pipe->readers)) {
48120 send_sig(SIGPIPE, current, 0);
48121 ret = -EPIPE;
48122 break;
48123 @@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48124 ret = -ERESTARTSYS;
48125 break;
48126 }
48127 - pipe->waiting_writers++;
48128 + atomic_inc(&pipe->waiting_writers);
48129 pipe_wait(pipe);
48130 - pipe->waiting_writers--;
48131 + atomic_dec(&pipe->waiting_writers);
48132 }
48133
48134 pipe_unlock(pipe);
48135 @@ -1818,14 +1818,14 @@ retry:
48136 pipe_double_lock(ipipe, opipe);
48137
48138 do {
48139 - if (!opipe->readers) {
48140 + if (!atomic_read(&opipe->readers)) {
48141 send_sig(SIGPIPE, current, 0);
48142 if (!ret)
48143 ret = -EPIPE;
48144 break;
48145 }
48146
48147 - if (!ipipe->nrbufs && !ipipe->writers)
48148 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48149 break;
48150
48151 /*
48152 @@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48153 pipe_double_lock(ipipe, opipe);
48154
48155 do {
48156 - if (!opipe->readers) {
48157 + if (!atomic_read(&opipe->readers)) {
48158 send_sig(SIGPIPE, current, 0);
48159 if (!ret)
48160 ret = -EPIPE;
48161 @@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48162 * return EAGAIN if we have the potential of some data in the
48163 * future, otherwise just return 0
48164 */
48165 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48166 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48167 ret = -EAGAIN;
48168
48169 pipe_unlock(ipipe);
48170 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48171 index 7fdf6a7..e6cd8ad 100644
48172 --- a/fs/sysfs/dir.c
48173 +++ b/fs/sysfs/dir.c
48174 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48175 struct sysfs_dirent *sd;
48176 int rc;
48177
48178 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48179 + const char *parent_name = parent_sd->s_name;
48180 +
48181 + mode = S_IFDIR | S_IRWXU;
48182 +
48183 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48184 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48185 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48186 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48187 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48188 +#endif
48189 +
48190 /* allocate */
48191 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48192 if (!sd)
48193 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48194 index 00012e3..8392349 100644
48195 --- a/fs/sysfs/file.c
48196 +++ b/fs/sysfs/file.c
48197 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48198
48199 struct sysfs_open_dirent {
48200 atomic_t refcnt;
48201 - atomic_t event;
48202 + atomic_unchecked_t event;
48203 wait_queue_head_t poll;
48204 struct list_head buffers; /* goes through sysfs_buffer.list */
48205 };
48206 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48207 if (!sysfs_get_active(attr_sd))
48208 return -ENODEV;
48209
48210 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48211 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48212 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48213
48214 sysfs_put_active(attr_sd);
48215 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48216 return -ENOMEM;
48217
48218 atomic_set(&new_od->refcnt, 0);
48219 - atomic_set(&new_od->event, 1);
48220 + atomic_set_unchecked(&new_od->event, 1);
48221 init_waitqueue_head(&new_od->poll);
48222 INIT_LIST_HEAD(&new_od->buffers);
48223 goto retry;
48224 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48225
48226 sysfs_put_active(attr_sd);
48227
48228 - if (buffer->event != atomic_read(&od->event))
48229 + if (buffer->event != atomic_read_unchecked(&od->event))
48230 goto trigger;
48231
48232 return DEFAULT_POLLMASK;
48233 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48234
48235 od = sd->s_attr.open;
48236 if (od) {
48237 - atomic_inc(&od->event);
48238 + atomic_inc_unchecked(&od->event);
48239 wake_up_interruptible(&od->poll);
48240 }
48241
48242 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48243 index a7ac78f..02158e1 100644
48244 --- a/fs/sysfs/symlink.c
48245 +++ b/fs/sysfs/symlink.c
48246 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48247
48248 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48249 {
48250 - char *page = nd_get_link(nd);
48251 + const char *page = nd_get_link(nd);
48252 if (!IS_ERR(page))
48253 free_page((unsigned long)page);
48254 }
48255 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48256 index c175b4d..8f36a16 100644
48257 --- a/fs/udf/misc.c
48258 +++ b/fs/udf/misc.c
48259 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48260
48261 u8 udf_tag_checksum(const struct tag *t)
48262 {
48263 - u8 *data = (u8 *)t;
48264 + const u8 *data = (const u8 *)t;
48265 u8 checksum = 0;
48266 int i;
48267 for (i = 0; i < sizeof(struct tag); ++i)
48268 diff --git a/fs/utimes.c b/fs/utimes.c
48269 index ba653f3..06ea4b1 100644
48270 --- a/fs/utimes.c
48271 +++ b/fs/utimes.c
48272 @@ -1,6 +1,7 @@
48273 #include <linux/compiler.h>
48274 #include <linux/file.h>
48275 #include <linux/fs.h>
48276 +#include <linux/security.h>
48277 #include <linux/linkage.h>
48278 #include <linux/mount.h>
48279 #include <linux/namei.h>
48280 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48281 goto mnt_drop_write_and_out;
48282 }
48283 }
48284 +
48285 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48286 + error = -EACCES;
48287 + goto mnt_drop_write_and_out;
48288 + }
48289 +
48290 mutex_lock(&inode->i_mutex);
48291 error = notify_change(path->dentry, &newattrs);
48292 mutex_unlock(&inode->i_mutex);
48293 diff --git a/fs/xattr.c b/fs/xattr.c
48294 index 82f4337..236473c 100644
48295 --- a/fs/xattr.c
48296 +++ b/fs/xattr.c
48297 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48298 * Extended attribute SET operations
48299 */
48300 static long
48301 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48302 +setxattr(struct path *path, const char __user *name, const void __user *value,
48303 size_t size, int flags)
48304 {
48305 int error;
48306 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48307 return PTR_ERR(kvalue);
48308 }
48309
48310 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48311 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48312 + error = -EACCES;
48313 + goto out;
48314 + }
48315 +
48316 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48317 +out:
48318 kfree(kvalue);
48319 return error;
48320 }
48321 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48322 return error;
48323 error = mnt_want_write(path.mnt);
48324 if (!error) {
48325 - error = setxattr(path.dentry, name, value, size, flags);
48326 + error = setxattr(&path, name, value, size, flags);
48327 mnt_drop_write(path.mnt);
48328 }
48329 path_put(&path);
48330 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48331 return error;
48332 error = mnt_want_write(path.mnt);
48333 if (!error) {
48334 - error = setxattr(path.dentry, name, value, size, flags);
48335 + error = setxattr(&path, name, value, size, flags);
48336 mnt_drop_write(path.mnt);
48337 }
48338 path_put(&path);
48339 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48340 const void __user *,value, size_t, size, int, flags)
48341 {
48342 struct file *f;
48343 - struct dentry *dentry;
48344 int error = -EBADF;
48345
48346 f = fget(fd);
48347 if (!f)
48348 return error;
48349 - dentry = f->f_path.dentry;
48350 - audit_inode(NULL, dentry);
48351 + audit_inode(NULL, f->f_path.dentry);
48352 error = mnt_want_write_file(f);
48353 if (!error) {
48354 - error = setxattr(dentry, name, value, size, flags);
48355 + error = setxattr(&f->f_path, name, value, size, flags);
48356 mnt_drop_write_file(f);
48357 }
48358 fput(f);
48359 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48360 index 8d5a506..7f62712 100644
48361 --- a/fs/xattr_acl.c
48362 +++ b/fs/xattr_acl.c
48363 @@ -17,8 +17,8 @@
48364 struct posix_acl *
48365 posix_acl_from_xattr(const void *value, size_t size)
48366 {
48367 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48368 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48369 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48370 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48371 int count;
48372 struct posix_acl *acl;
48373 struct posix_acl_entry *acl_e;
48374 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48375 index 188ef2f..adcf864 100644
48376 --- a/fs/xfs/xfs_bmap.c
48377 +++ b/fs/xfs/xfs_bmap.c
48378 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48379 int nmap,
48380 int ret_nmap);
48381 #else
48382 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48383 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48384 #endif /* DEBUG */
48385
48386 STATIC int
48387 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48388 index 79d05e8..e3e5861 100644
48389 --- a/fs/xfs/xfs_dir2_sf.c
48390 +++ b/fs/xfs/xfs_dir2_sf.c
48391 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48392 }
48393
48394 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48395 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48396 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48397 + char name[sfep->namelen];
48398 + memcpy(name, sfep->name, sfep->namelen);
48399 + if (filldir(dirent, name, sfep->namelen,
48400 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48401 + *offset = off & 0x7fffffff;
48402 + return 0;
48403 + }
48404 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48405 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48406 *offset = off & 0x7fffffff;
48407 return 0;
48408 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48409 index 76f3ca5..f57f712 100644
48410 --- a/fs/xfs/xfs_ioctl.c
48411 +++ b/fs/xfs/xfs_ioctl.c
48412 @@ -128,7 +128,7 @@ xfs_find_handle(
48413 }
48414
48415 error = -EFAULT;
48416 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48417 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48418 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48419 goto out_put;
48420
48421 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48422 index ab30253..4d86958 100644
48423 --- a/fs/xfs/xfs_iops.c
48424 +++ b/fs/xfs/xfs_iops.c
48425 @@ -447,7 +447,7 @@ xfs_vn_put_link(
48426 struct nameidata *nd,
48427 void *p)
48428 {
48429 - char *s = nd_get_link(nd);
48430 + const char *s = nd_get_link(nd);
48431
48432 if (!IS_ERR(s))
48433 kfree(s);
48434 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48435 new file mode 100644
48436 index 0000000..4089e05
48437 --- /dev/null
48438 +++ b/grsecurity/Kconfig
48439 @@ -0,0 +1,1078 @@
48440 +#
48441 +# grecurity configuration
48442 +#
48443 +
48444 +menu "Grsecurity"
48445 +
48446 +config GRKERNSEC
48447 + bool "Grsecurity"
48448 + select CRYPTO
48449 + select CRYPTO_SHA256
48450 + help
48451 + If you say Y here, you will be able to configure many features
48452 + that will enhance the security of your system. It is highly
48453 + recommended that you say Y here and read through the help
48454 + for each option so that you fully understand the features and
48455 + can evaluate their usefulness for your machine.
48456 +
48457 +choice
48458 + prompt "Security Level"
48459 + depends on GRKERNSEC
48460 + default GRKERNSEC_CUSTOM
48461 +
48462 +config GRKERNSEC_LOW
48463 + bool "Low"
48464 + select GRKERNSEC_LINK
48465 + select GRKERNSEC_FIFO
48466 + select GRKERNSEC_RANDNET
48467 + select GRKERNSEC_DMESG
48468 + select GRKERNSEC_CHROOT
48469 + select GRKERNSEC_CHROOT_CHDIR
48470 +
48471 + help
48472 + If you choose this option, several of the grsecurity options will
48473 + be enabled that will give you greater protection against a number
48474 + of attacks, while assuring that none of your software will have any
48475 + conflicts with the additional security measures. If you run a lot
48476 + of unusual software, or you are having problems with the higher
48477 + security levels, you should say Y here. With this option, the
48478 + following features are enabled:
48479 +
48480 + - Linking restrictions
48481 + - FIFO restrictions
48482 + - Restricted dmesg
48483 + - Enforced chdir("/") on chroot
48484 + - Runtime module disabling
48485 +
48486 +config GRKERNSEC_MEDIUM
48487 + bool "Medium"
48488 + select PAX
48489 + select PAX_EI_PAX
48490 + select PAX_PT_PAX_FLAGS
48491 + select PAX_HAVE_ACL_FLAGS
48492 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48493 + select GRKERNSEC_CHROOT
48494 + select GRKERNSEC_CHROOT_SYSCTL
48495 + select GRKERNSEC_LINK
48496 + select GRKERNSEC_FIFO
48497 + select GRKERNSEC_DMESG
48498 + select GRKERNSEC_RANDNET
48499 + select GRKERNSEC_FORKFAIL
48500 + select GRKERNSEC_TIME
48501 + select GRKERNSEC_SIGNAL
48502 + select GRKERNSEC_CHROOT
48503 + select GRKERNSEC_CHROOT_UNIX
48504 + select GRKERNSEC_CHROOT_MOUNT
48505 + select GRKERNSEC_CHROOT_PIVOT
48506 + select GRKERNSEC_CHROOT_DOUBLE
48507 + select GRKERNSEC_CHROOT_CHDIR
48508 + select GRKERNSEC_CHROOT_MKNOD
48509 + select GRKERNSEC_PROC
48510 + select GRKERNSEC_PROC_USERGROUP
48511 + select PAX_RANDUSTACK
48512 + select PAX_ASLR
48513 + select PAX_RANDMMAP
48514 + select PAX_REFCOUNT if (X86 || SPARC64)
48515 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
48516 +
48517 + help
48518 + If you say Y here, several features in addition to those included
48519 + in the low additional security level will be enabled. These
48520 + features provide even more security to your system, though in rare
48521 + cases they may be incompatible with very old or poorly written
48522 + software. If you enable this option, make sure that your auth
48523 + service (identd) is running as gid 1001. With this option,
48524 + the following features (in addition to those provided in the
48525 + low additional security level) will be enabled:
48526 +
48527 + - Failed fork logging
48528 + - Time change logging
48529 + - Signal logging
48530 + - Deny mounts in chroot
48531 + - Deny double chrooting
48532 + - Deny sysctl writes in chroot
48533 + - Deny mknod in chroot
48534 + - Deny access to abstract AF_UNIX sockets out of chroot
48535 + - Deny pivot_root in chroot
48536 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
48537 + - /proc restrictions with special GID set to 10 (usually wheel)
48538 + - Address Space Layout Randomization (ASLR)
48539 + - Prevent exploitation of most refcount overflows
48540 + - Bounds checking of copying between the kernel and userland
48541 +
48542 +config GRKERNSEC_HIGH
48543 + bool "High"
48544 + select GRKERNSEC_LINK
48545 + select GRKERNSEC_FIFO
48546 + select GRKERNSEC_DMESG
48547 + select GRKERNSEC_FORKFAIL
48548 + select GRKERNSEC_TIME
48549 + select GRKERNSEC_SIGNAL
48550 + select GRKERNSEC_CHROOT
48551 + select GRKERNSEC_CHROOT_SHMAT
48552 + select GRKERNSEC_CHROOT_UNIX
48553 + select GRKERNSEC_CHROOT_MOUNT
48554 + select GRKERNSEC_CHROOT_FCHDIR
48555 + select GRKERNSEC_CHROOT_PIVOT
48556 + select GRKERNSEC_CHROOT_DOUBLE
48557 + select GRKERNSEC_CHROOT_CHDIR
48558 + select GRKERNSEC_CHROOT_MKNOD
48559 + select GRKERNSEC_CHROOT_CAPS
48560 + select GRKERNSEC_CHROOT_SYSCTL
48561 + select GRKERNSEC_CHROOT_FINDTASK
48562 + select GRKERNSEC_SYSFS_RESTRICT
48563 + select GRKERNSEC_PROC
48564 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48565 + select GRKERNSEC_HIDESYM
48566 + select GRKERNSEC_BRUTE
48567 + select GRKERNSEC_PROC_USERGROUP
48568 + select GRKERNSEC_KMEM
48569 + select GRKERNSEC_RESLOG
48570 + select GRKERNSEC_RANDNET
48571 + select GRKERNSEC_PROC_ADD
48572 + select GRKERNSEC_CHROOT_CHMOD
48573 + select GRKERNSEC_CHROOT_NICE
48574 + select GRKERNSEC_SETXID
48575 + select GRKERNSEC_AUDIT_MOUNT
48576 + select GRKERNSEC_MODHARDEN if (MODULES)
48577 + select GRKERNSEC_HARDEN_PTRACE
48578 + select GRKERNSEC_PTRACE_READEXEC
48579 + select GRKERNSEC_VM86 if (X86_32)
48580 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
48581 + select PAX
48582 + select PAX_RANDUSTACK
48583 + select PAX_ASLR
48584 + select PAX_RANDMMAP
48585 + select PAX_NOEXEC
48586 + select PAX_MPROTECT
48587 + select PAX_EI_PAX
48588 + select PAX_PT_PAX_FLAGS
48589 + select PAX_HAVE_ACL_FLAGS
48590 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
48591 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
48592 + select PAX_RANDKSTACK if (X86_TSC && X86)
48593 + select PAX_SEGMEXEC if (X86_32)
48594 + select PAX_PAGEEXEC
48595 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
48596 + select PAX_EMUTRAMP if (PARISC)
48597 + select PAX_EMUSIGRT if (PARISC)
48598 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
48599 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
48600 + select PAX_REFCOUNT if (X86 || SPARC64)
48601 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
48602 + help
48603 + If you say Y here, many of the features of grsecurity will be
48604 + enabled, which will protect you against many kinds of attacks
48605 + against your system. The heightened security comes at a cost
48606 + of an increased chance of incompatibilities with rare software
48607 + on your machine. Since this security level enables PaX, you should
48608 + view <http://pax.grsecurity.net> and read about the PaX
48609 + project. While you are there, download chpax and run it on
48610 + binaries that cause problems with PaX. Also remember that
48611 + since the /proc restrictions are enabled, you must run your
48612 + identd as gid 1001. This security level enables the following
48613 + features in addition to those listed in the low and medium
48614 + security levels:
48615 +
48616 + - Additional /proc restrictions
48617 + - Chmod restrictions in chroot
48618 + - No signals, ptrace, or viewing of processes outside of chroot
48619 + - Capability restrictions in chroot
48620 + - Deny fchdir out of chroot
48621 + - Priority restrictions in chroot
48622 + - Segmentation-based implementation of PaX
48623 + - Mprotect restrictions
48624 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
48625 + - Kernel stack randomization
48626 + - Mount/unmount/remount logging
48627 + - Kernel symbol hiding
48628 + - Hardening of module auto-loading
48629 + - Ptrace restrictions
48630 + - Restricted vm86 mode
48631 + - Restricted sysfs/debugfs
48632 + - Active kernel exploit response
48633 +
48634 +config GRKERNSEC_CUSTOM
48635 + bool "Custom"
48636 + help
48637 + If you say Y here, you will be able to configure every grsecurity
48638 + option, which allows you to enable many more features that aren't
48639 + covered in the basic security levels. These additional features
48640 + include TPE, socket restrictions, and the sysctl system for
48641 + grsecurity. It is advised that you read through the help for
48642 + each option to determine its usefulness in your situation.
48643 +
48644 +endchoice
48645 +
48646 +menu "Memory Protections"
48647 +depends on GRKERNSEC
48648 +
48649 +config GRKERNSEC_KMEM
48650 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
48651 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
48652 + help
48653 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
48654 + be written to or read from to modify or leak the contents of the running
48655 + kernel. /dev/port will also not be allowed to be opened. If you have module
48656 + support disabled, enabling this will close up four ways that are
48657 + currently used to insert malicious code into the running kernel.
48658 + Even with all these features enabled, we still highly recommend that
48659 + you use the RBAC system, as it is still possible for an attacker to
48660 + modify the running kernel through privileged I/O granted by ioperm/iopl.
48661 + If you are not using XFree86, you may be able to stop this additional
48662 + case by enabling the 'Disable privileged I/O' option. Though nothing
48663 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
48664 + but only to video memory, which is the only writing we allow in this
48665 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
48666 + not be allowed to mprotect it with PROT_WRITE later.
48667 + It is highly recommended that you say Y here if you meet all the
48668 + conditions above.
48669 +
48670 +config GRKERNSEC_VM86
48671 + bool "Restrict VM86 mode"
48672 + depends on X86_32
48673 +
48674 + help
48675 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
48676 + make use of a special execution mode on 32bit x86 processors called
48677 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
48678 + video cards and will still work with this option enabled. The purpose
48679 + of the option is to prevent exploitation of emulation errors in
48680 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
48681 + Nearly all users should be able to enable this option.
48682 +
48683 +config GRKERNSEC_IO
48684 + bool "Disable privileged I/O"
48685 + depends on X86
48686 + select RTC_CLASS
48687 + select RTC_INTF_DEV
48688 + select RTC_DRV_CMOS
48689 +
48690 + help
48691 + If you say Y here, all ioperm and iopl calls will return an error.
48692 + Ioperm and iopl can be used to modify the running kernel.
48693 + Unfortunately, some programs need this access to operate properly,
48694 + the most notable of which are XFree86 and hwclock. hwclock can be
48695 + remedied by having RTC support in the kernel, so real-time
48696 + clock support is enabled if this option is enabled, to ensure
48697 + that hwclock operates correctly. XFree86 still will not
48698 + operate correctly with this option enabled, so DO NOT CHOOSE Y
48699 + IF YOU USE XFree86. If you use XFree86 and you still want to
48700 + protect your kernel against modification, use the RBAC system.
48701 +
48702 +config GRKERNSEC_PROC_MEMMAP
48703 + bool "Harden ASLR against information leaks and entropy reduction"
48704 + default y if (PAX_NOEXEC || PAX_ASLR)
48705 + depends on PAX_NOEXEC || PAX_ASLR
48706 + help
48707 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
48708 + give no information about the addresses of its mappings if
48709 + PaX features that rely on random addresses are enabled on the task.
48710 + In addition to sanitizing this information and disabling other
48711 + dangerous sources of information, this option causes reads of sensitive
48712 + /proc/<pid> entries where the file descriptor was opened in a different
48713 + task than the one performing the read. Such attempts are logged.
48714 + This option also limits argv/env strings for suid/sgid binaries
48715 + to 512KB to prevent a complete exhaustion of the stack entropy provided
48716 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
48717 + binaries to prevent alternative mmap layouts from being abused.
48718 +
48719 + If you use PaX it is essential that you say Y here as it closes up
48720 + several holes that make full ASLR useless locally.
48721 +
48722 +config GRKERNSEC_BRUTE
48723 + bool "Deter exploit bruteforcing"
48724 + help
48725 + If you say Y here, attempts to bruteforce exploits against forking
48726 + daemons such as apache or sshd, as well as against suid/sgid binaries
48727 + will be deterred. When a child of a forking daemon is killed by PaX
48728 + or crashes due to an illegal instruction or other suspicious signal,
48729 + the parent process will be delayed 30 seconds upon every subsequent
48730 + fork until the administrator is able to assess the situation and
48731 + restart the daemon.
48732 + In the suid/sgid case, the attempt is logged, the user has all their
48733 + processes terminated, and they are prevented from executing any further
48734 + processes for 15 minutes.
48735 + It is recommended that you also enable signal logging in the auditing
48736 + section so that logs are generated when a process triggers a suspicious
48737 + signal.
48738 + If the sysctl option is enabled, a sysctl option with name
48739 + "deter_bruteforce" is created.
48740 +
48741 +
48742 +config GRKERNSEC_MODHARDEN
48743 + bool "Harden module auto-loading"
48744 + depends on MODULES
48745 + help
48746 + If you say Y here, module auto-loading in response to use of some
48747 + feature implemented by an unloaded module will be restricted to
48748 + root users. Enabling this option helps defend against attacks
48749 + by unprivileged users who abuse the auto-loading behavior to
48750 + cause a vulnerable module to load that is then exploited.
48751 +
48752 + If this option prevents a legitimate use of auto-loading for a
48753 + non-root user, the administrator can execute modprobe manually
48754 + with the exact name of the module mentioned in the alert log.
48755 + Alternatively, the administrator can add the module to the list
48756 + of modules loaded at boot by modifying init scripts.
48757 +
48758 + Modification of init scripts will most likely be needed on
48759 + Ubuntu servers with encrypted home directory support enabled,
48760 + as the first non-root user logging in will cause the ecb(aes),
48761 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
48762 +
48763 +config GRKERNSEC_HIDESYM
48764 + bool "Hide kernel symbols"
48765 + help
48766 + If you say Y here, getting information on loaded modules, and
48767 + displaying all kernel symbols through a syscall will be restricted
48768 + to users with CAP_SYS_MODULE. For software compatibility reasons,
48769 + /proc/kallsyms will be restricted to the root user. The RBAC
48770 + system can hide that entry even from root.
48771 +
48772 + This option also prevents leaking of kernel addresses through
48773 + several /proc entries.
48774 +
48775 + Note that this option is only effective provided the following
48776 + conditions are met:
48777 + 1) The kernel using grsecurity is not precompiled by some distribution
48778 + 2) You have also enabled GRKERNSEC_DMESG
48779 + 3) You are using the RBAC system and hiding other files such as your
48780 + kernel image and System.map. Alternatively, enabling this option
48781 + causes the permissions on /boot, /lib/modules, and the kernel
48782 + source directory to change at compile time to prevent
48783 + reading by non-root users.
48784 + If the above conditions are met, this option will aid in providing a
48785 + useful protection against local kernel exploitation of overflows
48786 + and arbitrary read/write vulnerabilities.
48787 +
48788 +config GRKERNSEC_KERN_LOCKOUT
48789 + bool "Active kernel exploit response"
48790 + depends on X86 || ARM || PPC || SPARC
48791 + help
48792 + If you say Y here, when a PaX alert is triggered due to suspicious
48793 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
48794 + or an OOPs occurs due to bad memory accesses, instead of just
48795 + terminating the offending process (and potentially allowing
48796 + a subsequent exploit from the same user), we will take one of two
48797 + actions:
48798 + If the user was root, we will panic the system
48799 + If the user was non-root, we will log the attempt, terminate
48800 + all processes owned by the user, then prevent them from creating
48801 + any new processes until the system is restarted
48802 + This deters repeated kernel exploitation/bruteforcing attempts
48803 + and is useful for later forensics.
48804 +
48805 +endmenu
48806 +menu "Role Based Access Control Options"
48807 +depends on GRKERNSEC
48808 +
48809 +config GRKERNSEC_RBAC_DEBUG
48810 + bool
48811 +
48812 +config GRKERNSEC_NO_RBAC
48813 + bool "Disable RBAC system"
48814 + help
48815 + If you say Y here, the /dev/grsec device will be removed from the kernel,
48816 + preventing the RBAC system from being enabled. You should only say Y
48817 + here if you have no intention of using the RBAC system, so as to prevent
48818 + an attacker with root access from misusing the RBAC system to hide files
48819 + and processes when loadable module support and /dev/[k]mem have been
48820 + locked down.
48821 +
48822 +config GRKERNSEC_ACL_HIDEKERN
48823 + bool "Hide kernel processes"
48824 + help
48825 + If you say Y here, all kernel threads will be hidden to all
48826 + processes but those whose subject has the "view hidden processes"
48827 + flag.
48828 +
48829 +config GRKERNSEC_ACL_MAXTRIES
48830 + int "Maximum tries before password lockout"
48831 + default 3
48832 + help
48833 + This option enforces the maximum number of times a user can attempt
48834 + to authorize themselves with the grsecurity RBAC system before being
48835 + denied the ability to attempt authorization again for a specified time.
48836 + The lower the number, the harder it will be to brute-force a password.
48837 +
48838 +config GRKERNSEC_ACL_TIMEOUT
48839 + int "Time to wait after max password tries, in seconds"
48840 + default 30
48841 + help
48842 + This option specifies the time the user must wait after attempting to
48843 + authorize to the RBAC system with the maximum number of invalid
48844 + passwords. The higher the number, the harder it will be to brute-force
48845 + a password.
48846 +
48847 +endmenu
48848 +menu "Filesystem Protections"
48849 +depends on GRKERNSEC
48850 +
48851 +config GRKERNSEC_PROC
48852 + bool "Proc restrictions"
48853 + help
48854 + If you say Y here, the permissions of the /proc filesystem
48855 + will be altered to enhance system security and privacy. You MUST
48856 + choose either a user only restriction or a user and group restriction.
48857 + Depending upon the option you choose, you can either restrict users to
48858 + see only the processes they themselves run, or choose a group that can
48859 + view all processes and files normally restricted to root if you choose
48860 + the "restrict to user only" option. NOTE: If you're running identd or
48861 + ntpd as a non-root user, you will have to run it as the group you
48862 + specify here.
48863 +
48864 +config GRKERNSEC_PROC_USER
48865 + bool "Restrict /proc to user only"
48866 + depends on GRKERNSEC_PROC
48867 + help
48868 + If you say Y here, non-root users will only be able to view their own
48869 + processes, and restricts them from viewing network-related information,
48870 + and viewing kernel symbol and module information.
48871 +
48872 +config GRKERNSEC_PROC_USERGROUP
48873 + bool "Allow special group"
48874 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
48875 + help
48876 + If you say Y here, you will be able to select a group that will be
48877 + able to view all processes and network-related information. If you've
48878 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
48879 + remain hidden. This option is useful if you want to run identd as
48880 + a non-root user.
48881 +
48882 +config GRKERNSEC_PROC_GID
48883 + int "GID for special group"
48884 + depends on GRKERNSEC_PROC_USERGROUP
48885 + default 1001
48886 +
48887 +config GRKERNSEC_PROC_ADD
48888 + bool "Additional restrictions"
48889 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
48890 + help
48891 + If you say Y here, additional restrictions will be placed on
48892 + /proc that keep normal users from viewing device information and
48893 + slabinfo information that could be useful for exploits.
48894 +
48895 +config GRKERNSEC_LINK
48896 + bool "Linking restrictions"
48897 + help
48898 + If you say Y here, /tmp race exploits will be prevented, since users
48899 + will no longer be able to follow symlinks owned by other users in
48900 + world-writable +t directories (e.g. /tmp), unless the owner of the
48901 + symlink is the owner of the directory. users will also not be
48902 + able to hardlink to files they do not own. If the sysctl option is
48903 + enabled, a sysctl option with name "linking_restrictions" is created.
48904 +
48905 +config GRKERNSEC_FIFO
48906 + bool "FIFO restrictions"
48907 + help
48908 + If you say Y here, users will not be able to write to FIFOs they don't
48909 + own in world-writable +t directories (e.g. /tmp), unless the owner of
48910 + the FIFO is the same owner of the directory it's held in. If the sysctl
48911 + option is enabled, a sysctl option with name "fifo_restrictions" is
48912 + created.
48913 +
48914 +config GRKERNSEC_SYSFS_RESTRICT
48915 + bool "Sysfs/debugfs restriction"
48916 + depends on SYSFS
48917 + help
48918 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
48919 + any filesystem normally mounted under it (e.g. debugfs) will be
48920 + mostly accessible only by root. These filesystems generally provide access
48921 + to hardware and debug information that isn't appropriate for unprivileged
48922 + users of the system. Sysfs and debugfs have also become a large source
48923 + of new vulnerabilities, ranging from infoleaks to local compromise.
48924 + There has been very little oversight with an eye toward security involved
48925 + in adding new exporters of information to these filesystems, so their
48926 + use is discouraged.
48927 + For reasons of compatibility, a few directories have been whitelisted
48928 + for access by non-root users:
48929 + /sys/fs/selinux
48930 + /sys/fs/fuse
48931 + /sys/devices/system/cpu
48932 +
48933 +config GRKERNSEC_ROFS
48934 + bool "Runtime read-only mount protection"
48935 + help
48936 + If you say Y here, a sysctl option with name "romount_protect" will
48937 + be created. By setting this option to 1 at runtime, filesystems
48938 + will be protected in the following ways:
48939 + * No new writable mounts will be allowed
48940 + * Existing read-only mounts won't be able to be remounted read/write
48941 + * Write operations will be denied on all block devices
48942 + This option acts independently of grsec_lock: once it is set to 1,
48943 + it cannot be turned off. Therefore, please be mindful of the resulting
48944 + behavior if this option is enabled in an init script on a read-only
48945 + filesystem. This feature is mainly intended for secure embedded systems.
48946 +
48947 +config GRKERNSEC_CHROOT
48948 + bool "Chroot jail restrictions"
48949 + help
48950 + If you say Y here, you will be able to choose several options that will
48951 + make breaking out of a chrooted jail much more difficult. If you
48952 + encounter no software incompatibilities with the following options, it
48953 + is recommended that you enable each one.
48954 +
48955 +config GRKERNSEC_CHROOT_MOUNT
48956 + bool "Deny mounts"
48957 + depends on GRKERNSEC_CHROOT
48958 + help
48959 + If you say Y here, processes inside a chroot will not be able to
48960 + mount or remount filesystems. If the sysctl option is enabled, a
48961 + sysctl option with name "chroot_deny_mount" is created.
48962 +
48963 +config GRKERNSEC_CHROOT_DOUBLE
48964 + bool "Deny double-chroots"
48965 + depends on GRKERNSEC_CHROOT
48966 + help
48967 + If you say Y here, processes inside a chroot will not be able to chroot
48968 + again outside the chroot. This is a widely used method of breaking
48969 + out of a chroot jail and should not be allowed. If the sysctl
48970 + option is enabled, a sysctl option with name
48971 + "chroot_deny_chroot" is created.
48972 +
48973 +config GRKERNSEC_CHROOT_PIVOT
48974 + bool "Deny pivot_root in chroot"
48975 + depends on GRKERNSEC_CHROOT
48976 + help
48977 + If you say Y here, processes inside a chroot will not be able to use
48978 + a function called pivot_root() that was introduced in Linux 2.3.41. It
48979 + works similar to chroot in that it changes the root filesystem. This
48980 + function could be misused in a chrooted process to attempt to break out
48981 + of the chroot, and therefore should not be allowed. If the sysctl
48982 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
48983 + created.
48984 +
48985 +config GRKERNSEC_CHROOT_CHDIR
48986 + bool "Enforce chdir(\"/\") on all chroots"
48987 + depends on GRKERNSEC_CHROOT
48988 + help
48989 + If you say Y here, the current working directory of all newly-chrooted
48990 + applications will be set to the the root directory of the chroot.
48991 + The man page on chroot(2) states:
48992 + Note that this call does not change the current working
48993 + directory, so that `.' can be outside the tree rooted at
48994 + `/'. In particular, the super-user can escape from a
48995 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
48996 +
48997 + It is recommended that you say Y here, since it's not known to break
48998 + any software. If the sysctl option is enabled, a sysctl option with
48999 + name "chroot_enforce_chdir" is created.
49000 +
49001 +config GRKERNSEC_CHROOT_CHMOD
49002 + bool "Deny (f)chmod +s"
49003 + depends on GRKERNSEC_CHROOT
49004 + help
49005 + If you say Y here, processes inside a chroot will not be able to chmod
49006 + or fchmod files to make them have suid or sgid bits. This protects
49007 + against another published method of breaking a chroot. If the sysctl
49008 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49009 + created.
49010 +
49011 +config GRKERNSEC_CHROOT_FCHDIR
49012 + bool "Deny fchdir out of chroot"
49013 + depends on GRKERNSEC_CHROOT
49014 + help
49015 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49016 + to a file descriptor of the chrooting process that points to a directory
49017 + outside the filesystem will be stopped. If the sysctl option
49018 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49019 +
49020 +config GRKERNSEC_CHROOT_MKNOD
49021 + bool "Deny mknod"
49022 + depends on GRKERNSEC_CHROOT
49023 + help
49024 + If you say Y here, processes inside a chroot will not be allowed to
49025 + mknod. The problem with using mknod inside a chroot is that it
49026 + would allow an attacker to create a device entry that is the same
49027 + as one on the physical root of your system, which could range from
49028 + anything from the console device to a device for your harddrive (which
49029 + they could then use to wipe the drive or steal data). It is recommended
49030 + that you say Y here, unless you run into software incompatibilities.
49031 + If the sysctl option is enabled, a sysctl option with name
49032 + "chroot_deny_mknod" is created.
49033 +
49034 +config GRKERNSEC_CHROOT_SHMAT
49035 + bool "Deny shmat() out of chroot"
49036 + depends on GRKERNSEC_CHROOT
49037 + help
49038 + If you say Y here, processes inside a chroot will not be able to attach
49039 + to shared memory segments that were created outside of the chroot jail.
49040 + It is recommended that you say Y here. If the sysctl option is enabled,
49041 + a sysctl option with name "chroot_deny_shmat" is created.
49042 +
49043 +config GRKERNSEC_CHROOT_UNIX
49044 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49045 + depends on GRKERNSEC_CHROOT
49046 + help
49047 + If you say Y here, processes inside a chroot will not be able to
49048 + connect to abstract (meaning not belonging to a filesystem) Unix
49049 + domain sockets that were bound outside of a chroot. It is recommended
49050 + that you say Y here. If the sysctl option is enabled, a sysctl option
49051 + with name "chroot_deny_unix" is created.
49052 +
49053 +config GRKERNSEC_CHROOT_FINDTASK
49054 + bool "Protect outside processes"
49055 + depends on GRKERNSEC_CHROOT
49056 + help
49057 + If you say Y here, processes inside a chroot will not be able to
49058 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49059 + getsid, or view any process outside of the chroot. If the sysctl
49060 + option is enabled, a sysctl option with name "chroot_findtask" is
49061 + created.
49062 +
49063 +config GRKERNSEC_CHROOT_NICE
49064 + bool "Restrict priority changes"
49065 + depends on GRKERNSEC_CHROOT
49066 + help
49067 + If you say Y here, processes inside a chroot will not be able to raise
49068 + the priority of processes in the chroot, or alter the priority of
49069 + processes outside the chroot. This provides more security than simply
49070 + removing CAP_SYS_NICE from the process' capability set. If the
49071 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49072 + is created.
49073 +
49074 +config GRKERNSEC_CHROOT_SYSCTL
49075 + bool "Deny sysctl writes"
49076 + depends on GRKERNSEC_CHROOT
49077 + help
49078 + If you say Y here, an attacker in a chroot will not be able to
49079 + write to sysctl entries, either by sysctl(2) or through a /proc
49080 + interface. It is strongly recommended that you say Y here. If the
49081 + sysctl option is enabled, a sysctl option with name
49082 + "chroot_deny_sysctl" is created.
49083 +
49084 +config GRKERNSEC_CHROOT_CAPS
49085 + bool "Capability restrictions"
49086 + depends on GRKERNSEC_CHROOT
49087 + help
49088 + If you say Y here, the capabilities on all processes within a
49089 + chroot jail will be lowered to stop module insertion, raw i/o,
49090 + system and net admin tasks, rebooting the system, modifying immutable
49091 + files, modifying IPC owned by another, and changing the system time.
49092 + This is left an option because it can break some apps. Disable this
49093 + if your chrooted apps are having problems performing those kinds of
49094 + tasks. If the sysctl option is enabled, a sysctl option with
49095 + name "chroot_caps" is created.
49096 +
49097 +endmenu
49098 +menu "Kernel Auditing"
49099 +depends on GRKERNSEC
49100 +
49101 +config GRKERNSEC_AUDIT_GROUP
49102 + bool "Single group for auditing"
49103 + help
49104 + If you say Y here, the exec, chdir, and (un)mount logging features
49105 + will only operate on a group you specify. This option is recommended
49106 + if you only want to watch certain users instead of having a large
49107 + amount of logs from the entire system. If the sysctl option is enabled,
49108 + a sysctl option with name "audit_group" is created.
49109 +
49110 +config GRKERNSEC_AUDIT_GID
49111 + int "GID for auditing"
49112 + depends on GRKERNSEC_AUDIT_GROUP
49113 + default 1007
49114 +
49115 +config GRKERNSEC_EXECLOG
49116 + bool "Exec logging"
49117 + help
49118 + If you say Y here, all execve() calls will be logged (since the
49119 + other exec*() calls are frontends to execve(), all execution
49120 + will be logged). Useful for shell-servers that like to keep track
49121 + of their users. If the sysctl option is enabled, a sysctl option with
49122 + name "exec_logging" is created.
49123 + WARNING: This option when enabled will produce a LOT of logs, especially
49124 + on an active system.
49125 +
49126 +config GRKERNSEC_RESLOG
49127 + bool "Resource logging"
49128 + help
49129 + If you say Y here, all attempts to overstep resource limits will
49130 + be logged with the resource name, the requested size, and the current
49131 + limit. It is highly recommended that you say Y here. If the sysctl
49132 + option is enabled, a sysctl option with name "resource_logging" is
49133 + created. If the RBAC system is enabled, the sysctl value is ignored.
49134 +
49135 +config GRKERNSEC_CHROOT_EXECLOG
49136 + bool "Log execs within chroot"
49137 + help
49138 + If you say Y here, all executions inside a chroot jail will be logged
49139 + to syslog. This can cause a large amount of logs if certain
49140 + applications (eg. djb's daemontools) are installed on the system, and
49141 + is therefore left as an option. If the sysctl option is enabled, a
49142 + sysctl option with name "chroot_execlog" is created.
49143 +
49144 +config GRKERNSEC_AUDIT_PTRACE
49145 + bool "Ptrace logging"
49146 + help
49147 + If you say Y here, all attempts to attach to a process via ptrace
49148 + will be logged. If the sysctl option is enabled, a sysctl option
49149 + with name "audit_ptrace" is created.
49150 +
49151 +config GRKERNSEC_AUDIT_CHDIR
49152 + bool "Chdir logging"
49153 + help
49154 + If you say Y here, all chdir() calls will be logged. If the sysctl
49155 + option is enabled, a sysctl option with name "audit_chdir" is created.
49156 +
49157 +config GRKERNSEC_AUDIT_MOUNT
49158 + bool "(Un)Mount logging"
49159 + help
49160 + If you say Y here, all mounts and unmounts will be logged. If the
49161 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49162 + created.
49163 +
49164 +config GRKERNSEC_SIGNAL
49165 + bool "Signal logging"
49166 + help
49167 + If you say Y here, certain important signals will be logged, such as
49168 + SIGSEGV, which will as a result inform you of when a error in a program
49169 + occurred, which in some cases could mean a possible exploit attempt.
49170 + If the sysctl option is enabled, a sysctl option with name
49171 + "signal_logging" is created.
49172 +
49173 +config GRKERNSEC_FORKFAIL
49174 + bool "Fork failure logging"
49175 + help
49176 + If you say Y here, all failed fork() attempts will be logged.
49177 + This could suggest a fork bomb, or someone attempting to overstep
49178 + their process limit. If the sysctl option is enabled, a sysctl option
49179 + with name "forkfail_logging" is created.
49180 +
49181 +config GRKERNSEC_TIME
49182 + bool "Time change logging"
49183 + help
49184 + If you say Y here, any changes of the system clock will be logged.
49185 + If the sysctl option is enabled, a sysctl option with name
49186 + "timechange_logging" is created.
49187 +
49188 +config GRKERNSEC_PROC_IPADDR
49189 + bool "/proc/<pid>/ipaddr support"
49190 + help
49191 + If you say Y here, a new entry will be added to each /proc/<pid>
49192 + directory that contains the IP address of the person using the task.
49193 + The IP is carried across local TCP and AF_UNIX stream sockets.
49194 + This information can be useful for IDS/IPSes to perform remote response
49195 + to a local attack. The entry is readable by only the owner of the
49196 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49197 + the RBAC system), and thus does not create privacy concerns.
49198 +
49199 +config GRKERNSEC_RWXMAP_LOG
49200 + bool 'Denied RWX mmap/mprotect logging'
49201 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49202 + help
49203 + If you say Y here, calls to mmap() and mprotect() with explicit
49204 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49205 + denied by the PAX_MPROTECT feature. If the sysctl option is
49206 + enabled, a sysctl option with name "rwxmap_logging" is created.
49207 +
49208 +config GRKERNSEC_AUDIT_TEXTREL
49209 + bool 'ELF text relocations logging (READ HELP)'
49210 + depends on PAX_MPROTECT
49211 + help
49212 + If you say Y here, text relocations will be logged with the filename
49213 + of the offending library or binary. The purpose of the feature is
49214 + to help Linux distribution developers get rid of libraries and
49215 + binaries that need text relocations which hinder the future progress
49216 + of PaX. Only Linux distribution developers should say Y here, and
49217 + never on a production machine, as this option creates an information
49218 + leak that could aid an attacker in defeating the randomization of
49219 + a single memory region. If the sysctl option is enabled, a sysctl
49220 + option with name "audit_textrel" is created.
49221 +
49222 +endmenu
49223 +
49224 +menu "Executable Protections"
49225 +depends on GRKERNSEC
49226 +
49227 +config GRKERNSEC_DMESG
49228 + bool "Dmesg(8) restriction"
49229 + help
49230 + If you say Y here, non-root users will not be able to use dmesg(8)
49231 + to view up to the last 4kb of messages in the kernel's log buffer.
49232 + The kernel's log buffer often contains kernel addresses and other
49233 + identifying information useful to an attacker in fingerprinting a
49234 + system for a targeted exploit.
49235 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49236 + created.
49237 +
49238 +config GRKERNSEC_HARDEN_PTRACE
49239 + bool "Deter ptrace-based process snooping"
49240 + help
49241 + If you say Y here, TTY sniffers and other malicious monitoring
49242 + programs implemented through ptrace will be defeated. If you
49243 + have been using the RBAC system, this option has already been
49244 + enabled for several years for all users, with the ability to make
49245 + fine-grained exceptions.
49246 +
49247 + This option only affects the ability of non-root users to ptrace
49248 + processes that are not a descendent of the ptracing process.
49249 + This means that strace ./binary and gdb ./binary will still work,
49250 + but attaching to arbitrary processes will not. If the sysctl
49251 + option is enabled, a sysctl option with name "harden_ptrace" is
49252 + created.
49253 +
49254 +config GRKERNSEC_PTRACE_READEXEC
49255 + bool "Require read access to ptrace sensitive binaries"
49256 + help
49257 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49258 + binaries. This option is useful in environments that
49259 + remove the read bits (e.g. file mode 4711) from suid binaries to
49260 + prevent infoleaking of their contents. This option adds
49261 + consistency to the use of that file mode, as the binary could normally
49262 + be read out when run without privileges while ptracing.
49263 +
49264 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49265 + is created.
49266 +
49267 +config GRKERNSEC_SETXID
49268 + bool "Enforce consistent multithreaded privileges"
49269 + help
49270 + If you say Y here, a change from a root uid to a non-root uid
49271 + in a multithreaded application will cause the resulting uids,
49272 + gids, supplementary groups, and capabilities in that thread
49273 + to be propagated to the other threads of the process. In most
49274 + cases this is unnecessary, as glibc will emulate this behavior
49275 + on behalf of the application. Other libcs do not act in the
49276 + same way, allowing the other threads of the process to continue
49277 + running with root privileges. If the sysctl option is enabled,
49278 + a sysctl option with name "consistent_setxid" is created.
49279 +
49280 +config GRKERNSEC_TPE
49281 + bool "Trusted Path Execution (TPE)"
49282 + help
49283 + If you say Y here, you will be able to choose a gid to add to the
49284 + supplementary groups of users you want to mark as "untrusted."
49285 + These users will not be able to execute any files that are not in
49286 + root-owned directories writable only by root. If the sysctl option
49287 + is enabled, a sysctl option with name "tpe" is created.
49288 +
49289 +config GRKERNSEC_TPE_ALL
49290 + bool "Partially restrict all non-root users"
49291 + depends on GRKERNSEC_TPE
49292 + help
49293 + If you say Y here, all non-root users will be covered under
49294 + a weaker TPE restriction. This is separate from, and in addition to,
49295 + the main TPE options that you have selected elsewhere. Thus, if a
49296 + "trusted" GID is chosen, this restriction applies to even that GID.
49297 + Under this restriction, all non-root users will only be allowed to
49298 + execute files in directories they own that are not group or
49299 + world-writable, or in directories owned by root and writable only by
49300 + root. If the sysctl option is enabled, a sysctl option with name
49301 + "tpe_restrict_all" is created.
49302 +
49303 +config GRKERNSEC_TPE_INVERT
49304 + bool "Invert GID option"
49305 + depends on GRKERNSEC_TPE
49306 + help
49307 + If you say Y here, the group you specify in the TPE configuration will
49308 + decide what group TPE restrictions will be *disabled* for. This
49309 + option is useful if you want TPE restrictions to be applied to most
49310 + users on the system. If the sysctl option is enabled, a sysctl option
49311 + with name "tpe_invert" is created. Unlike other sysctl options, this
49312 + entry will default to on for backward-compatibility.
49313 +
49314 +config GRKERNSEC_TPE_GID
49315 + int "GID for untrusted users"
49316 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49317 + default 1005
49318 + help
49319 + Setting this GID determines what group TPE restrictions will be
49320 + *enabled* for. If the sysctl option is enabled, a sysctl option
49321 + with name "tpe_gid" is created.
49322 +
49323 +config GRKERNSEC_TPE_GID
49324 + int "GID for trusted users"
49325 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49326 + default 1005
49327 + help
49328 + Setting this GID determines what group TPE restrictions will be
49329 + *disabled* for. If the sysctl option is enabled, a sysctl option
49330 + with name "tpe_gid" is created.
49331 +
49332 +endmenu
49333 +menu "Network Protections"
49334 +depends on GRKERNSEC
49335 +
49336 +config GRKERNSEC_RANDNET
49337 + bool "Larger entropy pools"
49338 + help
49339 + If you say Y here, the entropy pools used for many features of Linux
49340 + and grsecurity will be doubled in size. Since several grsecurity
49341 + features use additional randomness, it is recommended that you say Y
49342 + here. Saying Y here has a similar effect as modifying
49343 + /proc/sys/kernel/random/poolsize.
49344 +
49345 +config GRKERNSEC_BLACKHOLE
49346 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49347 + depends on NET
49348 + help
49349 + If you say Y here, neither TCP resets nor ICMP
49350 + destination-unreachable packets will be sent in response to packets
49351 + sent to ports for which no associated listening process exists.
49352 + This feature supports both IPV4 and IPV6 and exempts the
49353 + loopback interface from blackholing. Enabling this feature
49354 + makes a host more resilient to DoS attacks and reduces network
49355 + visibility against scanners.
49356 +
49357 + The blackhole feature as-implemented is equivalent to the FreeBSD
49358 + blackhole feature, as it prevents RST responses to all packets, not
49359 + just SYNs. Under most application behavior this causes no
49360 + problems, but applications (like haproxy) may not close certain
49361 + connections in a way that cleanly terminates them on the remote
49362 + end, leaving the remote host in LAST_ACK state. Because of this
49363 + side-effect and to prevent intentional LAST_ACK DoSes, this
49364 + feature also adds automatic mitigation against such attacks.
49365 + The mitigation drastically reduces the amount of time a socket
49366 + can spend in LAST_ACK state. If you're using haproxy and not
49367 + all servers it connects to have this option enabled, consider
49368 + disabling this feature on the haproxy host.
49369 +
49370 + If the sysctl option is enabled, two sysctl options with names
49371 + "ip_blackhole" and "lastack_retries" will be created.
49372 + While "ip_blackhole" takes the standard zero/non-zero on/off
49373 + toggle, "lastack_retries" uses the same kinds of values as
49374 + "tcp_retries1" and "tcp_retries2". The default value of 4
49375 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49376 + state.
49377 +
49378 +config GRKERNSEC_SOCKET
49379 + bool "Socket restrictions"
49380 + depends on NET
49381 + help
49382 + If you say Y here, you will be able to choose from several options.
49383 + If you assign a GID on your system and add it to the supplementary
49384 + groups of users you want to restrict socket access to, this patch
49385 + will perform up to three things, based on the option(s) you choose.
49386 +
49387 +config GRKERNSEC_SOCKET_ALL
49388 + bool "Deny any sockets to group"
49389 + depends on GRKERNSEC_SOCKET
49390 + help
49391 + If you say Y here, you will be able to choose a GID of whose users will
49392 + be unable to connect to other hosts from your machine or run server
49393 + applications from your machine. If the sysctl option is enabled, a
49394 + sysctl option with name "socket_all" is created.
49395 +
49396 +config GRKERNSEC_SOCKET_ALL_GID
49397 + int "GID to deny all sockets for"
49398 + depends on GRKERNSEC_SOCKET_ALL
49399 + default 1004
49400 + help
49401 + Here you can choose the GID to disable socket access for. Remember to
49402 + add the users you want socket access disabled for to the GID
49403 + specified here. If the sysctl option is enabled, a sysctl option
49404 + with name "socket_all_gid" is created.
49405 +
49406 +config GRKERNSEC_SOCKET_CLIENT
49407 + bool "Deny client sockets to group"
49408 + depends on GRKERNSEC_SOCKET
49409 + help
49410 + If you say Y here, you will be able to choose a GID of whose users will
49411 + be unable to connect to other hosts from your machine, but will be
49412 + able to run servers. If this option is enabled, all users in the group
49413 + you specify will have to use passive mode when initiating ftp transfers
49414 + from the shell on your machine. If the sysctl option is enabled, a
49415 + sysctl option with name "socket_client" is created.
49416 +
49417 +config GRKERNSEC_SOCKET_CLIENT_GID
49418 + int "GID to deny client sockets for"
49419 + depends on GRKERNSEC_SOCKET_CLIENT
49420 + default 1003
49421 + help
49422 + Here you can choose the GID to disable client socket access for.
49423 + Remember to add the users you want client socket access disabled for to
49424 + the GID specified here. If the sysctl option is enabled, a sysctl
49425 + option with name "socket_client_gid" is created.
49426 +
49427 +config GRKERNSEC_SOCKET_SERVER
49428 + bool "Deny server sockets to group"
49429 + depends on GRKERNSEC_SOCKET
49430 + help
49431 + If you say Y here, you will be able to choose a GID of whose users will
49432 + be unable to run server applications from your machine. If the sysctl
49433 + option is enabled, a sysctl option with name "socket_server" is created.
49434 +
49435 +config GRKERNSEC_SOCKET_SERVER_GID
49436 + int "GID to deny server sockets for"
49437 + depends on GRKERNSEC_SOCKET_SERVER
49438 + default 1002
49439 + help
49440 + Here you can choose the GID to disable server socket access for.
49441 + Remember to add the users you want server socket access disabled for to
49442 + the GID specified here. If the sysctl option is enabled, a sysctl
49443 + option with name "socket_server_gid" is created.
49444 +
49445 +endmenu
49446 +menu "Sysctl support"
49447 +depends on GRKERNSEC && SYSCTL
49448 +
49449 +config GRKERNSEC_SYSCTL
49450 + bool "Sysctl support"
49451 + help
49452 + If you say Y here, you will be able to change the options that
49453 + grsecurity runs with at bootup, without having to recompile your
49454 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49455 + to enable (1) or disable (0) various features. All the sysctl entries
49456 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49457 + All features enabled in the kernel configuration are disabled at boot
49458 + if you do not say Y to the "Turn on features by default" option.
49459 + All options should be set at startup, and the grsec_lock entry should
49460 + be set to a non-zero value after all the options are set.
49461 + *THIS IS EXTREMELY IMPORTANT*
49462 +
49463 +config GRKERNSEC_SYSCTL_DISTRO
49464 + bool "Extra sysctl support for distro makers (READ HELP)"
49465 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49466 + help
49467 + If you say Y here, additional sysctl options will be created
49468 + for features that affect processes running as root. Therefore,
49469 + it is critical when using this option that the grsec_lock entry be
49470 + enabled after boot. Only distros with prebuilt kernel packages
49471 + with this option enabled that can ensure grsec_lock is enabled
49472 + after boot should use this option.
49473 + *Failure to set grsec_lock after boot makes all grsec features
49474 + this option covers useless*
49475 +
49476 + Currently this option creates the following sysctl entries:
49477 + "Disable Privileged I/O": "disable_priv_io"
49478 +
49479 +config GRKERNSEC_SYSCTL_ON
49480 + bool "Turn on features by default"
49481 + depends on GRKERNSEC_SYSCTL
49482 + help
49483 + If you say Y here, instead of having all features enabled in the
49484 + kernel configuration disabled at boot time, the features will be
49485 + enabled at boot time. It is recommended you say Y here unless
49486 + there is some reason you would want all sysctl-tunable features to
49487 + be disabled by default. As mentioned elsewhere, it is important
49488 + to enable the grsec_lock entry once you have finished modifying
49489 + the sysctl entries.
49490 +
49491 +endmenu
49492 +menu "Logging Options"
49493 +depends on GRKERNSEC
49494 +
49495 +config GRKERNSEC_FLOODTIME
49496 + int "Seconds in between log messages (minimum)"
49497 + default 10
49498 + help
49499 + This option allows you to enforce the number of seconds between
49500 + grsecurity log messages. The default should be suitable for most
49501 + people, however, if you choose to change it, choose a value small enough
49502 + to allow informative logs to be produced, but large enough to
49503 + prevent flooding.
49504 +
49505 +config GRKERNSEC_FLOODBURST
49506 + int "Number of messages in a burst (maximum)"
49507 + default 6
49508 + help
49509 + This option allows you to choose the maximum number of messages allowed
49510 + within the flood time interval you chose in a separate option. The
49511 + default should be suitable for most people, however if you find that
49512 + many of your logs are being interpreted as flooding, you may want to
49513 + raise this value.
49514 +
49515 +endmenu
49516 +
49517 +endmenu
49518 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
49519 new file mode 100644
49520 index 0000000..1b9afa9
49521 --- /dev/null
49522 +++ b/grsecurity/Makefile
49523 @@ -0,0 +1,38 @@
49524 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
49525 +# during 2001-2009 it has been completely redesigned by Brad Spengler
49526 +# into an RBAC system
49527 +#
49528 +# All code in this directory and various hooks inserted throughout the kernel
49529 +# are copyright Brad Spengler - Open Source Security, Inc., and released
49530 +# under the GPL v2 or higher
49531 +
49532 +KBUILD_CFLAGS += -Werror
49533 +
49534 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
49535 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
49536 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
49537 +
49538 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
49539 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
49540 + gracl_learn.o grsec_log.o
49541 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
49542 +
49543 +ifdef CONFIG_NET
49544 +obj-y += grsec_sock.o
49545 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
49546 +endif
49547 +
49548 +ifndef CONFIG_GRKERNSEC
49549 +obj-y += grsec_disabled.o
49550 +endif
49551 +
49552 +ifdef CONFIG_GRKERNSEC_HIDESYM
49553 +extra-y := grsec_hidesym.o
49554 +$(obj)/grsec_hidesym.o:
49555 + @-chmod -f 500 /boot
49556 + @-chmod -f 500 /lib/modules
49557 + @-chmod -f 500 /lib64/modules
49558 + @-chmod -f 500 /lib32/modules
49559 + @-chmod -f 700 .
49560 + @echo ' grsec: protected kernel image paths'
49561 +endif
49562 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
49563 new file mode 100644
49564 index 0000000..e8c5d41
49565 --- /dev/null
49566 +++ b/grsecurity/gracl.c
49567 @@ -0,0 +1,4179 @@
49568 +#include <linux/kernel.h>
49569 +#include <linux/module.h>
49570 +#include <linux/sched.h>
49571 +#include <linux/mm.h>
49572 +#include <linux/file.h>
49573 +#include <linux/fs.h>
49574 +#include <linux/namei.h>
49575 +#include <linux/mount.h>
49576 +#include <linux/tty.h>
49577 +#include <linux/proc_fs.h>
49578 +#include <linux/lglock.h>
49579 +#include <linux/slab.h>
49580 +#include <linux/vmalloc.h>
49581 +#include <linux/types.h>
49582 +#include <linux/sysctl.h>
49583 +#include <linux/netdevice.h>
49584 +#include <linux/ptrace.h>
49585 +#include <linux/gracl.h>
49586 +#include <linux/gralloc.h>
49587 +#include <linux/security.h>
49588 +#include <linux/grinternal.h>
49589 +#include <linux/pid_namespace.h>
49590 +#include <linux/fdtable.h>
49591 +#include <linux/percpu.h>
49592 +#include "../fs/mount.h"
49593 +
49594 +#include <asm/uaccess.h>
49595 +#include <asm/errno.h>
49596 +#include <asm/mman.h>
49597 +
49598 +static struct acl_role_db acl_role_set;
49599 +static struct name_db name_set;
49600 +static struct inodev_db inodev_set;
49601 +
49602 +/* for keeping track of userspace pointers used for subjects, so we
49603 + can share references in the kernel as well
49604 +*/
49605 +
49606 +static struct path real_root;
49607 +
49608 +static struct acl_subj_map_db subj_map_set;
49609 +
49610 +static struct acl_role_label *default_role;
49611 +
49612 +static struct acl_role_label *role_list;
49613 +
49614 +static u16 acl_sp_role_value;
49615 +
49616 +extern char *gr_shared_page[4];
49617 +static DEFINE_MUTEX(gr_dev_mutex);
49618 +DEFINE_RWLOCK(gr_inode_lock);
49619 +
49620 +struct gr_arg *gr_usermode;
49621 +
49622 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
49623 +
49624 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
49625 +extern void gr_clear_learn_entries(void);
49626 +
49627 +#ifdef CONFIG_GRKERNSEC_RESLOG
49628 +extern void gr_log_resource(const struct task_struct *task,
49629 + const int res, const unsigned long wanted, const int gt);
49630 +#endif
49631 +
49632 +unsigned char *gr_system_salt;
49633 +unsigned char *gr_system_sum;
49634 +
49635 +static struct sprole_pw **acl_special_roles = NULL;
49636 +static __u16 num_sprole_pws = 0;
49637 +
49638 +static struct acl_role_label *kernel_role = NULL;
49639 +
49640 +static unsigned int gr_auth_attempts = 0;
49641 +static unsigned long gr_auth_expires = 0UL;
49642 +
49643 +#ifdef CONFIG_NET
49644 +extern struct vfsmount *sock_mnt;
49645 +#endif
49646 +
49647 +extern struct vfsmount *pipe_mnt;
49648 +extern struct vfsmount *shm_mnt;
49649 +#ifdef CONFIG_HUGETLBFS
49650 +extern struct vfsmount *hugetlbfs_vfsmount;
49651 +#endif
49652 +
49653 +static struct acl_object_label *fakefs_obj_rw;
49654 +static struct acl_object_label *fakefs_obj_rwx;
49655 +
49656 +extern int gr_init_uidset(void);
49657 +extern void gr_free_uidset(void);
49658 +extern void gr_remove_uid(uid_t uid);
49659 +extern int gr_find_uid(uid_t uid);
49660 +
49661 +DECLARE_BRLOCK(vfsmount_lock);
49662 +
49663 +__inline__ int
49664 +gr_acl_is_enabled(void)
49665 +{
49666 + return (gr_status & GR_READY);
49667 +}
49668 +
49669 +#ifdef CONFIG_BTRFS_FS
49670 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49671 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49672 +#endif
49673 +
49674 +static inline dev_t __get_dev(const struct dentry *dentry)
49675 +{
49676 +#ifdef CONFIG_BTRFS_FS
49677 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49678 + return get_btrfs_dev_from_inode(dentry->d_inode);
49679 + else
49680 +#endif
49681 + return dentry->d_inode->i_sb->s_dev;
49682 +}
49683 +
49684 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
49685 +{
49686 + return __get_dev(dentry);
49687 +}
49688 +
49689 +static char gr_task_roletype_to_char(struct task_struct *task)
49690 +{
49691 + switch (task->role->roletype &
49692 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
49693 + GR_ROLE_SPECIAL)) {
49694 + case GR_ROLE_DEFAULT:
49695 + return 'D';
49696 + case GR_ROLE_USER:
49697 + return 'U';
49698 + case GR_ROLE_GROUP:
49699 + return 'G';
49700 + case GR_ROLE_SPECIAL:
49701 + return 'S';
49702 + }
49703 +
49704 + return 'X';
49705 +}
49706 +
49707 +char gr_roletype_to_char(void)
49708 +{
49709 + return gr_task_roletype_to_char(current);
49710 +}
49711 +
49712 +__inline__ int
49713 +gr_acl_tpe_check(void)
49714 +{
49715 + if (unlikely(!(gr_status & GR_READY)))
49716 + return 0;
49717 + if (current->role->roletype & GR_ROLE_TPE)
49718 + return 1;
49719 + else
49720 + return 0;
49721 +}
49722 +
49723 +int
49724 +gr_handle_rawio(const struct inode *inode)
49725 +{
49726 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49727 + if (inode && S_ISBLK(inode->i_mode) &&
49728 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49729 + !capable(CAP_SYS_RAWIO))
49730 + return 1;
49731 +#endif
49732 + return 0;
49733 +}
49734 +
49735 +static int
49736 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
49737 +{
49738 + if (likely(lena != lenb))
49739 + return 0;
49740 +
49741 + return !memcmp(a, b, lena);
49742 +}
49743 +
49744 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
49745 +{
49746 + *buflen -= namelen;
49747 + if (*buflen < 0)
49748 + return -ENAMETOOLONG;
49749 + *buffer -= namelen;
49750 + memcpy(*buffer, str, namelen);
49751 + return 0;
49752 +}
49753 +
49754 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
49755 +{
49756 + return prepend(buffer, buflen, name->name, name->len);
49757 +}
49758 +
49759 +static int prepend_path(const struct path *path, struct path *root,
49760 + char **buffer, int *buflen)
49761 +{
49762 + struct dentry *dentry = path->dentry;
49763 + struct vfsmount *vfsmnt = path->mnt;
49764 + struct mount *mnt = real_mount(vfsmnt);
49765 + bool slash = false;
49766 + int error = 0;
49767 +
49768 + while (dentry != root->dentry || vfsmnt != root->mnt) {
49769 + struct dentry * parent;
49770 +
49771 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
49772 + /* Global root? */
49773 + if (!mnt_has_parent(mnt)) {
49774 + goto out;
49775 + }
49776 + dentry = mnt->mnt_mountpoint;
49777 + mnt = mnt->mnt_parent;
49778 + vfsmnt = &mnt->mnt;
49779 + continue;
49780 + }
49781 + parent = dentry->d_parent;
49782 + prefetch(parent);
49783 + spin_lock(&dentry->d_lock);
49784 + error = prepend_name(buffer, buflen, &dentry->d_name);
49785 + spin_unlock(&dentry->d_lock);
49786 + if (!error)
49787 + error = prepend(buffer, buflen, "/", 1);
49788 + if (error)
49789 + break;
49790 +
49791 + slash = true;
49792 + dentry = parent;
49793 + }
49794 +
49795 +out:
49796 + if (!error && !slash)
49797 + error = prepend(buffer, buflen, "/", 1);
49798 +
49799 + return error;
49800 +}
49801 +
49802 +/* this must be called with vfsmount_lock and rename_lock held */
49803 +
49804 +static char *__our_d_path(const struct path *path, struct path *root,
49805 + char *buf, int buflen)
49806 +{
49807 + char *res = buf + buflen;
49808 + int error;
49809 +
49810 + prepend(&res, &buflen, "\0", 1);
49811 + error = prepend_path(path, root, &res, &buflen);
49812 + if (error)
49813 + return ERR_PTR(error);
49814 +
49815 + return res;
49816 +}
49817 +
49818 +static char *
49819 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
49820 +{
49821 + char *retval;
49822 +
49823 + retval = __our_d_path(path, root, buf, buflen);
49824 + if (unlikely(IS_ERR(retval)))
49825 + retval = strcpy(buf, "<path too long>");
49826 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
49827 + retval[1] = '\0';
49828 +
49829 + return retval;
49830 +}
49831 +
49832 +static char *
49833 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
49834 + char *buf, int buflen)
49835 +{
49836 + struct path path;
49837 + char *res;
49838 +
49839 + path.dentry = (struct dentry *)dentry;
49840 + path.mnt = (struct vfsmount *)vfsmnt;
49841 +
49842 + /* we can use real_root.dentry, real_root.mnt, because this is only called
49843 + by the RBAC system */
49844 + res = gen_full_path(&path, &real_root, buf, buflen);
49845 +
49846 + return res;
49847 +}
49848 +
49849 +static char *
49850 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
49851 + char *buf, int buflen)
49852 +{
49853 + char *res;
49854 + struct path path;
49855 + struct path root;
49856 + struct task_struct *reaper = &init_task;
49857 +
49858 + path.dentry = (struct dentry *)dentry;
49859 + path.mnt = (struct vfsmount *)vfsmnt;
49860 +
49861 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
49862 + get_fs_root(reaper->fs, &root);
49863 +
49864 + write_seqlock(&rename_lock);
49865 + br_read_lock(vfsmount_lock);
49866 + res = gen_full_path(&path, &root, buf, buflen);
49867 + br_read_unlock(vfsmount_lock);
49868 + write_sequnlock(&rename_lock);
49869 +
49870 + path_put(&root);
49871 + return res;
49872 +}
49873 +
49874 +static char *
49875 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
49876 +{
49877 + char *ret;
49878 + write_seqlock(&rename_lock);
49879 + br_read_lock(vfsmount_lock);
49880 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
49881 + PAGE_SIZE);
49882 + br_read_unlock(vfsmount_lock);
49883 + write_sequnlock(&rename_lock);
49884 + return ret;
49885 +}
49886 +
49887 +static char *
49888 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
49889 +{
49890 + char *ret;
49891 + char *buf;
49892 + int buflen;
49893 +
49894 + write_seqlock(&rename_lock);
49895 + br_read_lock(vfsmount_lock);
49896 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
49897 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
49898 + buflen = (int)(ret - buf);
49899 + if (buflen >= 5)
49900 + prepend(&ret, &buflen, "/proc", 5);
49901 + else
49902 + ret = strcpy(buf, "<path too long>");
49903 + br_read_unlock(vfsmount_lock);
49904 + write_sequnlock(&rename_lock);
49905 + return ret;
49906 +}
49907 +
49908 +char *
49909 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
49910 +{
49911 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
49912 + PAGE_SIZE);
49913 +}
49914 +
49915 +char *
49916 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
49917 +{
49918 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
49919 + PAGE_SIZE);
49920 +}
49921 +
49922 +char *
49923 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
49924 +{
49925 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
49926 + PAGE_SIZE);
49927 +}
49928 +
49929 +char *
49930 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
49931 +{
49932 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
49933 + PAGE_SIZE);
49934 +}
49935 +
49936 +char *
49937 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
49938 +{
49939 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
49940 + PAGE_SIZE);
49941 +}
49942 +
49943 +__inline__ __u32
49944 +to_gr_audit(const __u32 reqmode)
49945 +{
49946 + /* masks off auditable permission flags, then shifts them to create
49947 + auditing flags, and adds the special case of append auditing if
49948 + we're requesting write */
49949 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
49950 +}
49951 +
49952 +struct acl_subject_label *
49953 +lookup_subject_map(const struct acl_subject_label *userp)
49954 +{
49955 + unsigned int index = shash(userp, subj_map_set.s_size);
49956 + struct subject_map *match;
49957 +
49958 + match = subj_map_set.s_hash[index];
49959 +
49960 + while (match && match->user != userp)
49961 + match = match->next;
49962 +
49963 + if (match != NULL)
49964 + return match->kernel;
49965 + else
49966 + return NULL;
49967 +}
49968 +
49969 +static void
49970 +insert_subj_map_entry(struct subject_map *subjmap)
49971 +{
49972 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
49973 + struct subject_map **curr;
49974 +
49975 + subjmap->prev = NULL;
49976 +
49977 + curr = &subj_map_set.s_hash[index];
49978 + if (*curr != NULL)
49979 + (*curr)->prev = subjmap;
49980 +
49981 + subjmap->next = *curr;
49982 + *curr = subjmap;
49983 +
49984 + return;
49985 +}
49986 +
49987 +static struct acl_role_label *
49988 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
49989 + const gid_t gid)
49990 +{
49991 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
49992 + struct acl_role_label *match;
49993 + struct role_allowed_ip *ipp;
49994 + unsigned int x;
49995 + u32 curr_ip = task->signal->curr_ip;
49996 +
49997 + task->signal->saved_ip = curr_ip;
49998 +
49999 + match = acl_role_set.r_hash[index];
50000 +
50001 + while (match) {
50002 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50003 + for (x = 0; x < match->domain_child_num; x++) {
50004 + if (match->domain_children[x] == uid)
50005 + goto found;
50006 + }
50007 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50008 + break;
50009 + match = match->next;
50010 + }
50011 +found:
50012 + if (match == NULL) {
50013 + try_group:
50014 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50015 + match = acl_role_set.r_hash[index];
50016 +
50017 + while (match) {
50018 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50019 + for (x = 0; x < match->domain_child_num; x++) {
50020 + if (match->domain_children[x] == gid)
50021 + goto found2;
50022 + }
50023 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50024 + break;
50025 + match = match->next;
50026 + }
50027 +found2:
50028 + if (match == NULL)
50029 + match = default_role;
50030 + if (match->allowed_ips == NULL)
50031 + return match;
50032 + else {
50033 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50034 + if (likely
50035 + ((ntohl(curr_ip) & ipp->netmask) ==
50036 + (ntohl(ipp->addr) & ipp->netmask)))
50037 + return match;
50038 + }
50039 + match = default_role;
50040 + }
50041 + } else if (match->allowed_ips == NULL) {
50042 + return match;
50043 + } else {
50044 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50045 + if (likely
50046 + ((ntohl(curr_ip) & ipp->netmask) ==
50047 + (ntohl(ipp->addr) & ipp->netmask)))
50048 + return match;
50049 + }
50050 + goto try_group;
50051 + }
50052 +
50053 + return match;
50054 +}
50055 +
50056 +struct acl_subject_label *
50057 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50058 + const struct acl_role_label *role)
50059 +{
50060 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50061 + struct acl_subject_label *match;
50062 +
50063 + match = role->subj_hash[index];
50064 +
50065 + while (match && (match->inode != ino || match->device != dev ||
50066 + (match->mode & GR_DELETED))) {
50067 + match = match->next;
50068 + }
50069 +
50070 + if (match && !(match->mode & GR_DELETED))
50071 + return match;
50072 + else
50073 + return NULL;
50074 +}
50075 +
50076 +struct acl_subject_label *
50077 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50078 + const struct acl_role_label *role)
50079 +{
50080 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50081 + struct acl_subject_label *match;
50082 +
50083 + match = role->subj_hash[index];
50084 +
50085 + while (match && (match->inode != ino || match->device != dev ||
50086 + !(match->mode & GR_DELETED))) {
50087 + match = match->next;
50088 + }
50089 +
50090 + if (match && (match->mode & GR_DELETED))
50091 + return match;
50092 + else
50093 + return NULL;
50094 +}
50095 +
50096 +static struct acl_object_label *
50097 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50098 + const struct acl_subject_label *subj)
50099 +{
50100 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50101 + struct acl_object_label *match;
50102 +
50103 + match = subj->obj_hash[index];
50104 +
50105 + while (match && (match->inode != ino || match->device != dev ||
50106 + (match->mode & GR_DELETED))) {
50107 + match = match->next;
50108 + }
50109 +
50110 + if (match && !(match->mode & GR_DELETED))
50111 + return match;
50112 + else
50113 + return NULL;
50114 +}
50115 +
50116 +static struct acl_object_label *
50117 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50118 + const struct acl_subject_label *subj)
50119 +{
50120 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50121 + struct acl_object_label *match;
50122 +
50123 + match = subj->obj_hash[index];
50124 +
50125 + while (match && (match->inode != ino || match->device != dev ||
50126 + !(match->mode & GR_DELETED))) {
50127 + match = match->next;
50128 + }
50129 +
50130 + if (match && (match->mode & GR_DELETED))
50131 + return match;
50132 +
50133 + match = subj->obj_hash[index];
50134 +
50135 + while (match && (match->inode != ino || match->device != dev ||
50136 + (match->mode & GR_DELETED))) {
50137 + match = match->next;
50138 + }
50139 +
50140 + if (match && !(match->mode & GR_DELETED))
50141 + return match;
50142 + else
50143 + return NULL;
50144 +}
50145 +
50146 +static struct name_entry *
50147 +lookup_name_entry(const char *name)
50148 +{
50149 + unsigned int len = strlen(name);
50150 + unsigned int key = full_name_hash(name, len);
50151 + unsigned int index = key % name_set.n_size;
50152 + struct name_entry *match;
50153 +
50154 + match = name_set.n_hash[index];
50155 +
50156 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50157 + match = match->next;
50158 +
50159 + return match;
50160 +}
50161 +
50162 +static struct name_entry *
50163 +lookup_name_entry_create(const char *name)
50164 +{
50165 + unsigned int len = strlen(name);
50166 + unsigned int key = full_name_hash(name, len);
50167 + unsigned int index = key % name_set.n_size;
50168 + struct name_entry *match;
50169 +
50170 + match = name_set.n_hash[index];
50171 +
50172 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50173 + !match->deleted))
50174 + match = match->next;
50175 +
50176 + if (match && match->deleted)
50177 + return match;
50178 +
50179 + match = name_set.n_hash[index];
50180 +
50181 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50182 + match->deleted))
50183 + match = match->next;
50184 +
50185 + if (match && !match->deleted)
50186 + return match;
50187 + else
50188 + return NULL;
50189 +}
50190 +
50191 +static struct inodev_entry *
50192 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50193 +{
50194 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50195 + struct inodev_entry *match;
50196 +
50197 + match = inodev_set.i_hash[index];
50198 +
50199 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50200 + match = match->next;
50201 +
50202 + return match;
50203 +}
50204 +
50205 +static void
50206 +insert_inodev_entry(struct inodev_entry *entry)
50207 +{
50208 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50209 + inodev_set.i_size);
50210 + struct inodev_entry **curr;
50211 +
50212 + entry->prev = NULL;
50213 +
50214 + curr = &inodev_set.i_hash[index];
50215 + if (*curr != NULL)
50216 + (*curr)->prev = entry;
50217 +
50218 + entry->next = *curr;
50219 + *curr = entry;
50220 +
50221 + return;
50222 +}
50223 +
50224 +static void
50225 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50226 +{
50227 + unsigned int index =
50228 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50229 + struct acl_role_label **curr;
50230 + struct acl_role_label *tmp, *tmp2;
50231 +
50232 + curr = &acl_role_set.r_hash[index];
50233 +
50234 + /* simple case, slot is empty, just set it to our role */
50235 + if (*curr == NULL) {
50236 + *curr = role;
50237 + } else {
50238 + /* example:
50239 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
50240 + 2 -> 3
50241 + */
50242 + /* first check to see if we can already be reached via this slot */
50243 + tmp = *curr;
50244 + while (tmp && tmp != role)
50245 + tmp = tmp->next;
50246 + if (tmp == role) {
50247 + /* we don't need to add ourselves to this slot's chain */
50248 + return;
50249 + }
50250 + /* we need to add ourselves to this chain, two cases */
50251 + if (role->next == NULL) {
50252 + /* simple case, append the current chain to our role */
50253 + role->next = *curr;
50254 + *curr = role;
50255 + } else {
50256 + /* 1 -> 2 -> 3 -> 4
50257 + 2 -> 3 -> 4
50258 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50259 + */
50260 + /* trickier case: walk our role's chain until we find
50261 + the role for the start of the current slot's chain */
50262 + tmp = role;
50263 + tmp2 = *curr;
50264 + while (tmp->next && tmp->next != tmp2)
50265 + tmp = tmp->next;
50266 + if (tmp->next == tmp2) {
50267 + /* from example above, we found 3, so just
50268 + replace this slot's chain with ours */
50269 + *curr = role;
50270 + } else {
50271 + /* we didn't find a subset of our role's chain
50272 + in the current slot's chain, so append their
50273 + chain to ours, and set us as the first role in
50274 + the slot's chain
50275 +
50276 + we could fold this case with the case above,
50277 + but making it explicit for clarity
50278 + */
50279 + tmp->next = tmp2;
50280 + *curr = role;
50281 + }
50282 + }
50283 + }
50284 +
50285 + return;
50286 +}
50287 +
50288 +static void
50289 +insert_acl_role_label(struct acl_role_label *role)
50290 +{
50291 + int i;
50292 +
50293 + if (role_list == NULL) {
50294 + role_list = role;
50295 + role->prev = NULL;
50296 + } else {
50297 + role->prev = role_list;
50298 + role_list = role;
50299 + }
50300 +
50301 + /* used for hash chains */
50302 + role->next = NULL;
50303 +
50304 + if (role->roletype & GR_ROLE_DOMAIN) {
50305 + for (i = 0; i < role->domain_child_num; i++)
50306 + __insert_acl_role_label(role, role->domain_children[i]);
50307 + } else
50308 + __insert_acl_role_label(role, role->uidgid);
50309 +}
50310 +
50311 +static int
50312 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50313 +{
50314 + struct name_entry **curr, *nentry;
50315 + struct inodev_entry *ientry;
50316 + unsigned int len = strlen(name);
50317 + unsigned int key = full_name_hash(name, len);
50318 + unsigned int index = key % name_set.n_size;
50319 +
50320 + curr = &name_set.n_hash[index];
50321 +
50322 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50323 + curr = &((*curr)->next);
50324 +
50325 + if (*curr != NULL)
50326 + return 1;
50327 +
50328 + nentry = acl_alloc(sizeof (struct name_entry));
50329 + if (nentry == NULL)
50330 + return 0;
50331 + ientry = acl_alloc(sizeof (struct inodev_entry));
50332 + if (ientry == NULL)
50333 + return 0;
50334 + ientry->nentry = nentry;
50335 +
50336 + nentry->key = key;
50337 + nentry->name = name;
50338 + nentry->inode = inode;
50339 + nentry->device = device;
50340 + nentry->len = len;
50341 + nentry->deleted = deleted;
50342 +
50343 + nentry->prev = NULL;
50344 + curr = &name_set.n_hash[index];
50345 + if (*curr != NULL)
50346 + (*curr)->prev = nentry;
50347 + nentry->next = *curr;
50348 + *curr = nentry;
50349 +
50350 + /* insert us into the table searchable by inode/dev */
50351 + insert_inodev_entry(ientry);
50352 +
50353 + return 1;
50354 +}
50355 +
50356 +static void
50357 +insert_acl_obj_label(struct acl_object_label *obj,
50358 + struct acl_subject_label *subj)
50359 +{
50360 + unsigned int index =
50361 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50362 + struct acl_object_label **curr;
50363 +
50364 +
50365 + obj->prev = NULL;
50366 +
50367 + curr = &subj->obj_hash[index];
50368 + if (*curr != NULL)
50369 + (*curr)->prev = obj;
50370 +
50371 + obj->next = *curr;
50372 + *curr = obj;
50373 +
50374 + return;
50375 +}
50376 +
50377 +static void
50378 +insert_acl_subj_label(struct acl_subject_label *obj,
50379 + struct acl_role_label *role)
50380 +{
50381 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50382 + struct acl_subject_label **curr;
50383 +
50384 + obj->prev = NULL;
50385 +
50386 + curr = &role->subj_hash[index];
50387 + if (*curr != NULL)
50388 + (*curr)->prev = obj;
50389 +
50390 + obj->next = *curr;
50391 + *curr = obj;
50392 +
50393 + return;
50394 +}
50395 +
50396 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50397 +
50398 +static void *
50399 +create_table(__u32 * len, int elementsize)
50400 +{
50401 + unsigned int table_sizes[] = {
50402 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50403 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50404 + 4194301, 8388593, 16777213, 33554393, 67108859
50405 + };
50406 + void *newtable = NULL;
50407 + unsigned int pwr = 0;
50408 +
50409 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50410 + table_sizes[pwr] <= *len)
50411 + pwr++;
50412 +
50413 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50414 + return newtable;
50415 +
50416 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50417 + newtable =
50418 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50419 + else
50420 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50421 +
50422 + *len = table_sizes[pwr];
50423 +
50424 + return newtable;
50425 +}
50426 +
50427 +static int
50428 +init_variables(const struct gr_arg *arg)
50429 +{
50430 + struct task_struct *reaper = &init_task;
50431 + unsigned int stacksize;
50432 +
50433 + subj_map_set.s_size = arg->role_db.num_subjects;
50434 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50435 + name_set.n_size = arg->role_db.num_objects;
50436 + inodev_set.i_size = arg->role_db.num_objects;
50437 +
50438 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50439 + !name_set.n_size || !inodev_set.i_size)
50440 + return 1;
50441 +
50442 + if (!gr_init_uidset())
50443 + return 1;
50444 +
50445 + /* set up the stack that holds allocation info */
50446 +
50447 + stacksize = arg->role_db.num_pointers + 5;
50448 +
50449 + if (!acl_alloc_stack_init(stacksize))
50450 + return 1;
50451 +
50452 + /* grab reference for the real root dentry and vfsmount */
50453 + get_fs_root(reaper->fs, &real_root);
50454 +
50455 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50456 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50457 +#endif
50458 +
50459 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50460 + if (fakefs_obj_rw == NULL)
50461 + return 1;
50462 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50463 +
50464 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50465 + if (fakefs_obj_rwx == NULL)
50466 + return 1;
50467 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50468 +
50469 + subj_map_set.s_hash =
50470 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50471 + acl_role_set.r_hash =
50472 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50473 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50474 + inodev_set.i_hash =
50475 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50476 +
50477 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50478 + !name_set.n_hash || !inodev_set.i_hash)
50479 + return 1;
50480 +
50481 + memset(subj_map_set.s_hash, 0,
50482 + sizeof(struct subject_map *) * subj_map_set.s_size);
50483 + memset(acl_role_set.r_hash, 0,
50484 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
50485 + memset(name_set.n_hash, 0,
50486 + sizeof (struct name_entry *) * name_set.n_size);
50487 + memset(inodev_set.i_hash, 0,
50488 + sizeof (struct inodev_entry *) * inodev_set.i_size);
50489 +
50490 + return 0;
50491 +}
50492 +
50493 +/* free information not needed after startup
50494 + currently contains user->kernel pointer mappings for subjects
50495 +*/
50496 +
50497 +static void
50498 +free_init_variables(void)
50499 +{
50500 + __u32 i;
50501 +
50502 + if (subj_map_set.s_hash) {
50503 + for (i = 0; i < subj_map_set.s_size; i++) {
50504 + if (subj_map_set.s_hash[i]) {
50505 + kfree(subj_map_set.s_hash[i]);
50506 + subj_map_set.s_hash[i] = NULL;
50507 + }
50508 + }
50509 +
50510 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
50511 + PAGE_SIZE)
50512 + kfree(subj_map_set.s_hash);
50513 + else
50514 + vfree(subj_map_set.s_hash);
50515 + }
50516 +
50517 + return;
50518 +}
50519 +
50520 +static void
50521 +free_variables(void)
50522 +{
50523 + struct acl_subject_label *s;
50524 + struct acl_role_label *r;
50525 + struct task_struct *task, *task2;
50526 + unsigned int x;
50527 +
50528 + gr_clear_learn_entries();
50529 +
50530 + read_lock(&tasklist_lock);
50531 + do_each_thread(task2, task) {
50532 + task->acl_sp_role = 0;
50533 + task->acl_role_id = 0;
50534 + task->acl = NULL;
50535 + task->role = NULL;
50536 + } while_each_thread(task2, task);
50537 + read_unlock(&tasklist_lock);
50538 +
50539 + /* release the reference to the real root dentry and vfsmount */
50540 + path_put(&real_root);
50541 + memset(&real_root, 0, sizeof(real_root));
50542 +
50543 + /* free all object hash tables */
50544 +
50545 + FOR_EACH_ROLE_START(r)
50546 + if (r->subj_hash == NULL)
50547 + goto next_role;
50548 + FOR_EACH_SUBJECT_START(r, s, x)
50549 + if (s->obj_hash == NULL)
50550 + break;
50551 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50552 + kfree(s->obj_hash);
50553 + else
50554 + vfree(s->obj_hash);
50555 + FOR_EACH_SUBJECT_END(s, x)
50556 + FOR_EACH_NESTED_SUBJECT_START(r, s)
50557 + if (s->obj_hash == NULL)
50558 + break;
50559 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50560 + kfree(s->obj_hash);
50561 + else
50562 + vfree(s->obj_hash);
50563 + FOR_EACH_NESTED_SUBJECT_END(s)
50564 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
50565 + kfree(r->subj_hash);
50566 + else
50567 + vfree(r->subj_hash);
50568 + r->subj_hash = NULL;
50569 +next_role:
50570 + FOR_EACH_ROLE_END(r)
50571 +
50572 + acl_free_all();
50573 +
50574 + if (acl_role_set.r_hash) {
50575 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
50576 + PAGE_SIZE)
50577 + kfree(acl_role_set.r_hash);
50578 + else
50579 + vfree(acl_role_set.r_hash);
50580 + }
50581 + if (name_set.n_hash) {
50582 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
50583 + PAGE_SIZE)
50584 + kfree(name_set.n_hash);
50585 + else
50586 + vfree(name_set.n_hash);
50587 + }
50588 +
50589 + if (inodev_set.i_hash) {
50590 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
50591 + PAGE_SIZE)
50592 + kfree(inodev_set.i_hash);
50593 + else
50594 + vfree(inodev_set.i_hash);
50595 + }
50596 +
50597 + gr_free_uidset();
50598 +
50599 + memset(&name_set, 0, sizeof (struct name_db));
50600 + memset(&inodev_set, 0, sizeof (struct inodev_db));
50601 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
50602 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
50603 +
50604 + default_role = NULL;
50605 + kernel_role = NULL;
50606 + role_list = NULL;
50607 +
50608 + return;
50609 +}
50610 +
50611 +static __u32
50612 +count_user_objs(struct acl_object_label *userp)
50613 +{
50614 + struct acl_object_label o_tmp;
50615 + __u32 num = 0;
50616 +
50617 + while (userp) {
50618 + if (copy_from_user(&o_tmp, userp,
50619 + sizeof (struct acl_object_label)))
50620 + break;
50621 +
50622 + userp = o_tmp.prev;
50623 + num++;
50624 + }
50625 +
50626 + return num;
50627 +}
50628 +
50629 +static struct acl_subject_label *
50630 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
50631 +
50632 +static int
50633 +copy_user_glob(struct acl_object_label *obj)
50634 +{
50635 + struct acl_object_label *g_tmp, **guser;
50636 + unsigned int len;
50637 + char *tmp;
50638 +
50639 + if (obj->globbed == NULL)
50640 + return 0;
50641 +
50642 + guser = &obj->globbed;
50643 + while (*guser) {
50644 + g_tmp = (struct acl_object_label *)
50645 + acl_alloc(sizeof (struct acl_object_label));
50646 + if (g_tmp == NULL)
50647 + return -ENOMEM;
50648 +
50649 + if (copy_from_user(g_tmp, *guser,
50650 + sizeof (struct acl_object_label)))
50651 + return -EFAULT;
50652 +
50653 + len = strnlen_user(g_tmp->filename, PATH_MAX);
50654 +
50655 + if (!len || len >= PATH_MAX)
50656 + return -EINVAL;
50657 +
50658 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50659 + return -ENOMEM;
50660 +
50661 + if (copy_from_user(tmp, g_tmp->filename, len))
50662 + return -EFAULT;
50663 + tmp[len-1] = '\0';
50664 + g_tmp->filename = tmp;
50665 +
50666 + *guser = g_tmp;
50667 + guser = &(g_tmp->next);
50668 + }
50669 +
50670 + return 0;
50671 +}
50672 +
50673 +static int
50674 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
50675 + struct acl_role_label *role)
50676 +{
50677 + struct acl_object_label *o_tmp;
50678 + unsigned int len;
50679 + int ret;
50680 + char *tmp;
50681 +
50682 + while (userp) {
50683 + if ((o_tmp = (struct acl_object_label *)
50684 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
50685 + return -ENOMEM;
50686 +
50687 + if (copy_from_user(o_tmp, userp,
50688 + sizeof (struct acl_object_label)))
50689 + return -EFAULT;
50690 +
50691 + userp = o_tmp->prev;
50692 +
50693 + len = strnlen_user(o_tmp->filename, PATH_MAX);
50694 +
50695 + if (!len || len >= PATH_MAX)
50696 + return -EINVAL;
50697 +
50698 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50699 + return -ENOMEM;
50700 +
50701 + if (copy_from_user(tmp, o_tmp->filename, len))
50702 + return -EFAULT;
50703 + tmp[len-1] = '\0';
50704 + o_tmp->filename = tmp;
50705 +
50706 + insert_acl_obj_label(o_tmp, subj);
50707 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
50708 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
50709 + return -ENOMEM;
50710 +
50711 + ret = copy_user_glob(o_tmp);
50712 + if (ret)
50713 + return ret;
50714 +
50715 + if (o_tmp->nested) {
50716 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
50717 + if (IS_ERR(o_tmp->nested))
50718 + return PTR_ERR(o_tmp->nested);
50719 +
50720 + /* insert into nested subject list */
50721 + o_tmp->nested->next = role->hash->first;
50722 + role->hash->first = o_tmp->nested;
50723 + }
50724 + }
50725 +
50726 + return 0;
50727 +}
50728 +
50729 +static __u32
50730 +count_user_subjs(struct acl_subject_label *userp)
50731 +{
50732 + struct acl_subject_label s_tmp;
50733 + __u32 num = 0;
50734 +
50735 + while (userp) {
50736 + if (copy_from_user(&s_tmp, userp,
50737 + sizeof (struct acl_subject_label)))
50738 + break;
50739 +
50740 + userp = s_tmp.prev;
50741 + /* do not count nested subjects against this count, since
50742 + they are not included in the hash table, but are
50743 + attached to objects. We have already counted
50744 + the subjects in userspace for the allocation
50745 + stack
50746 + */
50747 + if (!(s_tmp.mode & GR_NESTED))
50748 + num++;
50749 + }
50750 +
50751 + return num;
50752 +}
50753 +
50754 +static int
50755 +copy_user_allowedips(struct acl_role_label *rolep)
50756 +{
50757 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
50758 +
50759 + ruserip = rolep->allowed_ips;
50760 +
50761 + while (ruserip) {
50762 + rlast = rtmp;
50763 +
50764 + if ((rtmp = (struct role_allowed_ip *)
50765 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
50766 + return -ENOMEM;
50767 +
50768 + if (copy_from_user(rtmp, ruserip,
50769 + sizeof (struct role_allowed_ip)))
50770 + return -EFAULT;
50771 +
50772 + ruserip = rtmp->prev;
50773 +
50774 + if (!rlast) {
50775 + rtmp->prev = NULL;
50776 + rolep->allowed_ips = rtmp;
50777 + } else {
50778 + rlast->next = rtmp;
50779 + rtmp->prev = rlast;
50780 + }
50781 +
50782 + if (!ruserip)
50783 + rtmp->next = NULL;
50784 + }
50785 +
50786 + return 0;
50787 +}
50788 +
50789 +static int
50790 +copy_user_transitions(struct acl_role_label *rolep)
50791 +{
50792 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
50793 +
50794 + unsigned int len;
50795 + char *tmp;
50796 +
50797 + rusertp = rolep->transitions;
50798 +
50799 + while (rusertp) {
50800 + rlast = rtmp;
50801 +
50802 + if ((rtmp = (struct role_transition *)
50803 + acl_alloc(sizeof (struct role_transition))) == NULL)
50804 + return -ENOMEM;
50805 +
50806 + if (copy_from_user(rtmp, rusertp,
50807 + sizeof (struct role_transition)))
50808 + return -EFAULT;
50809 +
50810 + rusertp = rtmp->prev;
50811 +
50812 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
50813 +
50814 + if (!len || len >= GR_SPROLE_LEN)
50815 + return -EINVAL;
50816 +
50817 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50818 + return -ENOMEM;
50819 +
50820 + if (copy_from_user(tmp, rtmp->rolename, len))
50821 + return -EFAULT;
50822 + tmp[len-1] = '\0';
50823 + rtmp->rolename = tmp;
50824 +
50825 + if (!rlast) {
50826 + rtmp->prev = NULL;
50827 + rolep->transitions = rtmp;
50828 + } else {
50829 + rlast->next = rtmp;
50830 + rtmp->prev = rlast;
50831 + }
50832 +
50833 + if (!rusertp)
50834 + rtmp->next = NULL;
50835 + }
50836 +
50837 + return 0;
50838 +}
50839 +
50840 +static struct acl_subject_label *
50841 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
50842 +{
50843 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
50844 + unsigned int len;
50845 + char *tmp;
50846 + __u32 num_objs;
50847 + struct acl_ip_label **i_tmp, *i_utmp2;
50848 + struct gr_hash_struct ghash;
50849 + struct subject_map *subjmap;
50850 + unsigned int i_num;
50851 + int err;
50852 +
50853 + s_tmp = lookup_subject_map(userp);
50854 +
50855 + /* we've already copied this subject into the kernel, just return
50856 + the reference to it, and don't copy it over again
50857 + */
50858 + if (s_tmp)
50859 + return(s_tmp);
50860 +
50861 + if ((s_tmp = (struct acl_subject_label *)
50862 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
50863 + return ERR_PTR(-ENOMEM);
50864 +
50865 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
50866 + if (subjmap == NULL)
50867 + return ERR_PTR(-ENOMEM);
50868 +
50869 + subjmap->user = userp;
50870 + subjmap->kernel = s_tmp;
50871 + insert_subj_map_entry(subjmap);
50872 +
50873 + if (copy_from_user(s_tmp, userp,
50874 + sizeof (struct acl_subject_label)))
50875 + return ERR_PTR(-EFAULT);
50876 +
50877 + len = strnlen_user(s_tmp->filename, PATH_MAX);
50878 +
50879 + if (!len || len >= PATH_MAX)
50880 + return ERR_PTR(-EINVAL);
50881 +
50882 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50883 + return ERR_PTR(-ENOMEM);
50884 +
50885 + if (copy_from_user(tmp, s_tmp->filename, len))
50886 + return ERR_PTR(-EFAULT);
50887 + tmp[len-1] = '\0';
50888 + s_tmp->filename = tmp;
50889 +
50890 + if (!strcmp(s_tmp->filename, "/"))
50891 + role->root_label = s_tmp;
50892 +
50893 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
50894 + return ERR_PTR(-EFAULT);
50895 +
50896 + /* copy user and group transition tables */
50897 +
50898 + if (s_tmp->user_trans_num) {
50899 + uid_t *uidlist;
50900 +
50901 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
50902 + if (uidlist == NULL)
50903 + return ERR_PTR(-ENOMEM);
50904 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
50905 + return ERR_PTR(-EFAULT);
50906 +
50907 + s_tmp->user_transitions = uidlist;
50908 + }
50909 +
50910 + if (s_tmp->group_trans_num) {
50911 + gid_t *gidlist;
50912 +
50913 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
50914 + if (gidlist == NULL)
50915 + return ERR_PTR(-ENOMEM);
50916 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
50917 + return ERR_PTR(-EFAULT);
50918 +
50919 + s_tmp->group_transitions = gidlist;
50920 + }
50921 +
50922 + /* set up object hash table */
50923 + num_objs = count_user_objs(ghash.first);
50924 +
50925 + s_tmp->obj_hash_size = num_objs;
50926 + s_tmp->obj_hash =
50927 + (struct acl_object_label **)
50928 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
50929 +
50930 + if (!s_tmp->obj_hash)
50931 + return ERR_PTR(-ENOMEM);
50932 +
50933 + memset(s_tmp->obj_hash, 0,
50934 + s_tmp->obj_hash_size *
50935 + sizeof (struct acl_object_label *));
50936 +
50937 + /* add in objects */
50938 + err = copy_user_objs(ghash.first, s_tmp, role);
50939 +
50940 + if (err)
50941 + return ERR_PTR(err);
50942 +
50943 + /* set pointer for parent subject */
50944 + if (s_tmp->parent_subject) {
50945 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
50946 +
50947 + if (IS_ERR(s_tmp2))
50948 + return s_tmp2;
50949 +
50950 + s_tmp->parent_subject = s_tmp2;
50951 + }
50952 +
50953 + /* add in ip acls */
50954 +
50955 + if (!s_tmp->ip_num) {
50956 + s_tmp->ips = NULL;
50957 + goto insert;
50958 + }
50959 +
50960 + i_tmp =
50961 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
50962 + sizeof (struct acl_ip_label *));
50963 +
50964 + if (!i_tmp)
50965 + return ERR_PTR(-ENOMEM);
50966 +
50967 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
50968 + *(i_tmp + i_num) =
50969 + (struct acl_ip_label *)
50970 + acl_alloc(sizeof (struct acl_ip_label));
50971 + if (!*(i_tmp + i_num))
50972 + return ERR_PTR(-ENOMEM);
50973 +
50974 + if (copy_from_user
50975 + (&i_utmp2, s_tmp->ips + i_num,
50976 + sizeof (struct acl_ip_label *)))
50977 + return ERR_PTR(-EFAULT);
50978 +
50979 + if (copy_from_user
50980 + (*(i_tmp + i_num), i_utmp2,
50981 + sizeof (struct acl_ip_label)))
50982 + return ERR_PTR(-EFAULT);
50983 +
50984 + if ((*(i_tmp + i_num))->iface == NULL)
50985 + continue;
50986 +
50987 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
50988 + if (!len || len >= IFNAMSIZ)
50989 + return ERR_PTR(-EINVAL);
50990 + tmp = acl_alloc(len);
50991 + if (tmp == NULL)
50992 + return ERR_PTR(-ENOMEM);
50993 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
50994 + return ERR_PTR(-EFAULT);
50995 + (*(i_tmp + i_num))->iface = tmp;
50996 + }
50997 +
50998 + s_tmp->ips = i_tmp;
50999 +
51000 +insert:
51001 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51002 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51003 + return ERR_PTR(-ENOMEM);
51004 +
51005 + return s_tmp;
51006 +}
51007 +
51008 +static int
51009 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51010 +{
51011 + struct acl_subject_label s_pre;
51012 + struct acl_subject_label * ret;
51013 + int err;
51014 +
51015 + while (userp) {
51016 + if (copy_from_user(&s_pre, userp,
51017 + sizeof (struct acl_subject_label)))
51018 + return -EFAULT;
51019 +
51020 + /* do not add nested subjects here, add
51021 + while parsing objects
51022 + */
51023 +
51024 + if (s_pre.mode & GR_NESTED) {
51025 + userp = s_pre.prev;
51026 + continue;
51027 + }
51028 +
51029 + ret = do_copy_user_subj(userp, role);
51030 +
51031 + err = PTR_ERR(ret);
51032 + if (IS_ERR(ret))
51033 + return err;
51034 +
51035 + insert_acl_subj_label(ret, role);
51036 +
51037 + userp = s_pre.prev;
51038 + }
51039 +
51040 + return 0;
51041 +}
51042 +
51043 +static int
51044 +copy_user_acl(struct gr_arg *arg)
51045 +{
51046 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51047 + struct sprole_pw *sptmp;
51048 + struct gr_hash_struct *ghash;
51049 + uid_t *domainlist;
51050 + unsigned int r_num;
51051 + unsigned int len;
51052 + char *tmp;
51053 + int err = 0;
51054 + __u16 i;
51055 + __u32 num_subjs;
51056 +
51057 + /* we need a default and kernel role */
51058 + if (arg->role_db.num_roles < 2)
51059 + return -EINVAL;
51060 +
51061 + /* copy special role authentication info from userspace */
51062 +
51063 + num_sprole_pws = arg->num_sprole_pws;
51064 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51065 +
51066 + if (!acl_special_roles && num_sprole_pws)
51067 + return -ENOMEM;
51068 +
51069 + for (i = 0; i < num_sprole_pws; i++) {
51070 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51071 + if (!sptmp)
51072 + return -ENOMEM;
51073 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51074 + sizeof (struct sprole_pw)))
51075 + return -EFAULT;
51076 +
51077 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51078 +
51079 + if (!len || len >= GR_SPROLE_LEN)
51080 + return -EINVAL;
51081 +
51082 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51083 + return -ENOMEM;
51084 +
51085 + if (copy_from_user(tmp, sptmp->rolename, len))
51086 + return -EFAULT;
51087 +
51088 + tmp[len-1] = '\0';
51089 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51090 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51091 +#endif
51092 + sptmp->rolename = tmp;
51093 + acl_special_roles[i] = sptmp;
51094 + }
51095 +
51096 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51097 +
51098 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51099 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51100 +
51101 + if (!r_tmp)
51102 + return -ENOMEM;
51103 +
51104 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51105 + sizeof (struct acl_role_label *)))
51106 + return -EFAULT;
51107 +
51108 + if (copy_from_user(r_tmp, r_utmp2,
51109 + sizeof (struct acl_role_label)))
51110 + return -EFAULT;
51111 +
51112 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51113 +
51114 + if (!len || len >= PATH_MAX)
51115 + return -EINVAL;
51116 +
51117 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51118 + return -ENOMEM;
51119 +
51120 + if (copy_from_user(tmp, r_tmp->rolename, len))
51121 + return -EFAULT;
51122 +
51123 + tmp[len-1] = '\0';
51124 + r_tmp->rolename = tmp;
51125 +
51126 + if (!strcmp(r_tmp->rolename, "default")
51127 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51128 + default_role = r_tmp;
51129 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51130 + kernel_role = r_tmp;
51131 + }
51132 +
51133 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51134 + return -ENOMEM;
51135 +
51136 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51137 + return -EFAULT;
51138 +
51139 + r_tmp->hash = ghash;
51140 +
51141 + num_subjs = count_user_subjs(r_tmp->hash->first);
51142 +
51143 + r_tmp->subj_hash_size = num_subjs;
51144 + r_tmp->subj_hash =
51145 + (struct acl_subject_label **)
51146 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51147 +
51148 + if (!r_tmp->subj_hash)
51149 + return -ENOMEM;
51150 +
51151 + err = copy_user_allowedips(r_tmp);
51152 + if (err)
51153 + return err;
51154 +
51155 + /* copy domain info */
51156 + if (r_tmp->domain_children != NULL) {
51157 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51158 + if (domainlist == NULL)
51159 + return -ENOMEM;
51160 +
51161 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51162 + return -EFAULT;
51163 +
51164 + r_tmp->domain_children = domainlist;
51165 + }
51166 +
51167 + err = copy_user_transitions(r_tmp);
51168 + if (err)
51169 + return err;
51170 +
51171 + memset(r_tmp->subj_hash, 0,
51172 + r_tmp->subj_hash_size *
51173 + sizeof (struct acl_subject_label *));
51174 +
51175 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51176 +
51177 + if (err)
51178 + return err;
51179 +
51180 + /* set nested subject list to null */
51181 + r_tmp->hash->first = NULL;
51182 +
51183 + insert_acl_role_label(r_tmp);
51184 + }
51185 +
51186 + if (default_role == NULL || kernel_role == NULL)
51187 + return -EINVAL;
51188 +
51189 + return err;
51190 +}
51191 +
51192 +static int
51193 +gracl_init(struct gr_arg *args)
51194 +{
51195 + int error = 0;
51196 +
51197 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51198 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51199 +
51200 + if (init_variables(args)) {
51201 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51202 + error = -ENOMEM;
51203 + free_variables();
51204 + goto out;
51205 + }
51206 +
51207 + error = copy_user_acl(args);
51208 + free_init_variables();
51209 + if (error) {
51210 + free_variables();
51211 + goto out;
51212 + }
51213 +
51214 + if ((error = gr_set_acls(0))) {
51215 + free_variables();
51216 + goto out;
51217 + }
51218 +
51219 + pax_open_kernel();
51220 + gr_status |= GR_READY;
51221 + pax_close_kernel();
51222 +
51223 + out:
51224 + return error;
51225 +}
51226 +
51227 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51228 +
51229 +static int
51230 +glob_match(const char *p, const char *n)
51231 +{
51232 + char c;
51233 +
51234 + while ((c = *p++) != '\0') {
51235 + switch (c) {
51236 + case '?':
51237 + if (*n == '\0')
51238 + return 1;
51239 + else if (*n == '/')
51240 + return 1;
51241 + break;
51242 + case '\\':
51243 + if (*n != c)
51244 + return 1;
51245 + break;
51246 + case '*':
51247 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51248 + if (*n == '/')
51249 + return 1;
51250 + else if (c == '?') {
51251 + if (*n == '\0')
51252 + return 1;
51253 + else
51254 + ++n;
51255 + }
51256 + }
51257 + if (c == '\0') {
51258 + return 0;
51259 + } else {
51260 + const char *endp;
51261 +
51262 + if ((endp = strchr(n, '/')) == NULL)
51263 + endp = n + strlen(n);
51264 +
51265 + if (c == '[') {
51266 + for (--p; n < endp; ++n)
51267 + if (!glob_match(p, n))
51268 + return 0;
51269 + } else if (c == '/') {
51270 + while (*n != '\0' && *n != '/')
51271 + ++n;
51272 + if (*n == '/' && !glob_match(p, n + 1))
51273 + return 0;
51274 + } else {
51275 + for (--p; n < endp; ++n)
51276 + if (*n == c && !glob_match(p, n))
51277 + return 0;
51278 + }
51279 +
51280 + return 1;
51281 + }
51282 + case '[':
51283 + {
51284 + int not;
51285 + char cold;
51286 +
51287 + if (*n == '\0' || *n == '/')
51288 + return 1;
51289 +
51290 + not = (*p == '!' || *p == '^');
51291 + if (not)
51292 + ++p;
51293 +
51294 + c = *p++;
51295 + for (;;) {
51296 + unsigned char fn = (unsigned char)*n;
51297 +
51298 + if (c == '\0')
51299 + return 1;
51300 + else {
51301 + if (c == fn)
51302 + goto matched;
51303 + cold = c;
51304 + c = *p++;
51305 +
51306 + if (c == '-' && *p != ']') {
51307 + unsigned char cend = *p++;
51308 +
51309 + if (cend == '\0')
51310 + return 1;
51311 +
51312 + if (cold <= fn && fn <= cend)
51313 + goto matched;
51314 +
51315 + c = *p++;
51316 + }
51317 + }
51318 +
51319 + if (c == ']')
51320 + break;
51321 + }
51322 + if (!not)
51323 + return 1;
51324 + break;
51325 + matched:
51326 + while (c != ']') {
51327 + if (c == '\0')
51328 + return 1;
51329 +
51330 + c = *p++;
51331 + }
51332 + if (not)
51333 + return 1;
51334 + }
51335 + break;
51336 + default:
51337 + if (c != *n)
51338 + return 1;
51339 + }
51340 +
51341 + ++n;
51342 + }
51343 +
51344 + if (*n == '\0')
51345 + return 0;
51346 +
51347 + if (*n == '/')
51348 + return 0;
51349 +
51350 + return 1;
51351 +}
51352 +
51353 +static struct acl_object_label *
51354 +chk_glob_label(struct acl_object_label *globbed,
51355 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51356 +{
51357 + struct acl_object_label *tmp;
51358 +
51359 + if (*path == NULL)
51360 + *path = gr_to_filename_nolock(dentry, mnt);
51361 +
51362 + tmp = globbed;
51363 +
51364 + while (tmp) {
51365 + if (!glob_match(tmp->filename, *path))
51366 + return tmp;
51367 + tmp = tmp->next;
51368 + }
51369 +
51370 + return NULL;
51371 +}
51372 +
51373 +static struct acl_object_label *
51374 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51375 + const ino_t curr_ino, const dev_t curr_dev,
51376 + const struct acl_subject_label *subj, char **path, const int checkglob)
51377 +{
51378 + struct acl_subject_label *tmpsubj;
51379 + struct acl_object_label *retval;
51380 + struct acl_object_label *retval2;
51381 +
51382 + tmpsubj = (struct acl_subject_label *) subj;
51383 + read_lock(&gr_inode_lock);
51384 + do {
51385 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51386 + if (retval) {
51387 + if (checkglob && retval->globbed) {
51388 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51389 + if (retval2)
51390 + retval = retval2;
51391 + }
51392 + break;
51393 + }
51394 + } while ((tmpsubj = tmpsubj->parent_subject));
51395 + read_unlock(&gr_inode_lock);
51396 +
51397 + return retval;
51398 +}
51399 +
51400 +static __inline__ struct acl_object_label *
51401 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51402 + struct dentry *curr_dentry,
51403 + const struct acl_subject_label *subj, char **path, const int checkglob)
51404 +{
51405 + int newglob = checkglob;
51406 + ino_t inode;
51407 + dev_t device;
51408 +
51409 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51410 + as we don't want a / * rule to match instead of the / object
51411 + don't do this for create lookups that call this function though, since they're looking up
51412 + on the parent and thus need globbing checks on all paths
51413 + */
51414 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51415 + newglob = GR_NO_GLOB;
51416 +
51417 + spin_lock(&curr_dentry->d_lock);
51418 + inode = curr_dentry->d_inode->i_ino;
51419 + device = __get_dev(curr_dentry);
51420 + spin_unlock(&curr_dentry->d_lock);
51421 +
51422 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51423 +}
51424 +
51425 +static struct acl_object_label *
51426 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51427 + const struct acl_subject_label *subj, char *path, const int checkglob)
51428 +{
51429 + struct dentry *dentry = (struct dentry *) l_dentry;
51430 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51431 + struct mount *real_mnt = real_mount(mnt);
51432 + struct acl_object_label *retval;
51433 + struct dentry *parent;
51434 +
51435 + write_seqlock(&rename_lock);
51436 + br_read_lock(vfsmount_lock);
51437 +
51438 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51439 +#ifdef CONFIG_NET
51440 + mnt == sock_mnt ||
51441 +#endif
51442 +#ifdef CONFIG_HUGETLBFS
51443 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51444 +#endif
51445 + /* ignore Eric Biederman */
51446 + IS_PRIVATE(l_dentry->d_inode))) {
51447 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51448 + goto out;
51449 + }
51450 +
51451 + for (;;) {
51452 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51453 + break;
51454 +
51455 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51456 + if (!mnt_has_parent(real_mnt))
51457 + break;
51458 +
51459 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51460 + if (retval != NULL)
51461 + goto out;
51462 +
51463 + dentry = real_mnt->mnt_mountpoint;
51464 + real_mnt = real_mnt->mnt_parent;
51465 + mnt = &real_mnt->mnt;
51466 + continue;
51467 + }
51468 +
51469 + parent = dentry->d_parent;
51470 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51471 + if (retval != NULL)
51472 + goto out;
51473 +
51474 + dentry = parent;
51475 + }
51476 +
51477 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51478 +
51479 + /* real_root is pinned so we don't have to hold a reference */
51480 + if (retval == NULL)
51481 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51482 +out:
51483 + br_read_unlock(vfsmount_lock);
51484 + write_sequnlock(&rename_lock);
51485 +
51486 + BUG_ON(retval == NULL);
51487 +
51488 + return retval;
51489 +}
51490 +
51491 +static __inline__ struct acl_object_label *
51492 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51493 + const struct acl_subject_label *subj)
51494 +{
51495 + char *path = NULL;
51496 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51497 +}
51498 +
51499 +static __inline__ struct acl_object_label *
51500 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51501 + const struct acl_subject_label *subj)
51502 +{
51503 + char *path = NULL;
51504 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
51505 +}
51506 +
51507 +static __inline__ struct acl_object_label *
51508 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51509 + const struct acl_subject_label *subj, char *path)
51510 +{
51511 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
51512 +}
51513 +
51514 +static struct acl_subject_label *
51515 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51516 + const struct acl_role_label *role)
51517 +{
51518 + struct dentry *dentry = (struct dentry *) l_dentry;
51519 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51520 + struct mount *real_mnt = real_mount(mnt);
51521 + struct acl_subject_label *retval;
51522 + struct dentry *parent;
51523 +
51524 + write_seqlock(&rename_lock);
51525 + br_read_lock(vfsmount_lock);
51526 +
51527 + for (;;) {
51528 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51529 + break;
51530 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51531 + if (!mnt_has_parent(real_mnt))
51532 + break;
51533 +
51534 + spin_lock(&dentry->d_lock);
51535 + read_lock(&gr_inode_lock);
51536 + retval =
51537 + lookup_acl_subj_label(dentry->d_inode->i_ino,
51538 + __get_dev(dentry), role);
51539 + read_unlock(&gr_inode_lock);
51540 + spin_unlock(&dentry->d_lock);
51541 + if (retval != NULL)
51542 + goto out;
51543 +
51544 + dentry = real_mnt->mnt_mountpoint;
51545 + real_mnt = real_mnt->mnt_parent;
51546 + mnt = &real_mnt->mnt;
51547 + continue;
51548 + }
51549 +
51550 + spin_lock(&dentry->d_lock);
51551 + read_lock(&gr_inode_lock);
51552 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51553 + __get_dev(dentry), role);
51554 + read_unlock(&gr_inode_lock);
51555 + parent = dentry->d_parent;
51556 + spin_unlock(&dentry->d_lock);
51557 +
51558 + if (retval != NULL)
51559 + goto out;
51560 +
51561 + dentry = parent;
51562 + }
51563 +
51564 + spin_lock(&dentry->d_lock);
51565 + read_lock(&gr_inode_lock);
51566 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51567 + __get_dev(dentry), role);
51568 + read_unlock(&gr_inode_lock);
51569 + spin_unlock(&dentry->d_lock);
51570 +
51571 + if (unlikely(retval == NULL)) {
51572 + /* real_root is pinned, we don't need to hold a reference */
51573 + read_lock(&gr_inode_lock);
51574 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
51575 + __get_dev(real_root.dentry), role);
51576 + read_unlock(&gr_inode_lock);
51577 + }
51578 +out:
51579 + br_read_unlock(vfsmount_lock);
51580 + write_sequnlock(&rename_lock);
51581 +
51582 + BUG_ON(retval == NULL);
51583 +
51584 + return retval;
51585 +}
51586 +
51587 +static void
51588 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
51589 +{
51590 + struct task_struct *task = current;
51591 + const struct cred *cred = current_cred();
51592 +
51593 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51594 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51595 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51596 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
51597 +
51598 + return;
51599 +}
51600 +
51601 +static void
51602 +gr_log_learn_sysctl(const char *path, const __u32 mode)
51603 +{
51604 + struct task_struct *task = current;
51605 + const struct cred *cred = current_cred();
51606 +
51607 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51608 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51609 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51610 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
51611 +
51612 + return;
51613 +}
51614 +
51615 +static void
51616 +gr_log_learn_id_change(const char type, const unsigned int real,
51617 + const unsigned int effective, const unsigned int fs)
51618 +{
51619 + struct task_struct *task = current;
51620 + const struct cred *cred = current_cred();
51621 +
51622 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
51623 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51624 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51625 + type, real, effective, fs, &task->signal->saved_ip);
51626 +
51627 + return;
51628 +}
51629 +
51630 +__u32
51631 +gr_search_file(const struct dentry * dentry, const __u32 mode,
51632 + const struct vfsmount * mnt)
51633 +{
51634 + __u32 retval = mode;
51635 + struct acl_subject_label *curracl;
51636 + struct acl_object_label *currobj;
51637 +
51638 + if (unlikely(!(gr_status & GR_READY)))
51639 + return (mode & ~GR_AUDITS);
51640 +
51641 + curracl = current->acl;
51642 +
51643 + currobj = chk_obj_label(dentry, mnt, curracl);
51644 + retval = currobj->mode & mode;
51645 +
51646 + /* if we're opening a specified transfer file for writing
51647 + (e.g. /dev/initctl), then transfer our role to init
51648 + */
51649 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
51650 + current->role->roletype & GR_ROLE_PERSIST)) {
51651 + struct task_struct *task = init_pid_ns.child_reaper;
51652 +
51653 + if (task->role != current->role) {
51654 + task->acl_sp_role = 0;
51655 + task->acl_role_id = current->acl_role_id;
51656 + task->role = current->role;
51657 + rcu_read_lock();
51658 + read_lock(&grsec_exec_file_lock);
51659 + gr_apply_subject_to_task(task);
51660 + read_unlock(&grsec_exec_file_lock);
51661 + rcu_read_unlock();
51662 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
51663 + }
51664 + }
51665 +
51666 + if (unlikely
51667 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
51668 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
51669 + __u32 new_mode = mode;
51670 +
51671 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51672 +
51673 + retval = new_mode;
51674 +
51675 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
51676 + new_mode |= GR_INHERIT;
51677 +
51678 + if (!(mode & GR_NOLEARN))
51679 + gr_log_learn(dentry, mnt, new_mode);
51680 + }
51681 +
51682 + return retval;
51683 +}
51684 +
51685 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
51686 + const struct dentry *parent,
51687 + const struct vfsmount *mnt)
51688 +{
51689 + struct name_entry *match;
51690 + struct acl_object_label *matchpo;
51691 + struct acl_subject_label *curracl;
51692 + char *path;
51693 +
51694 + if (unlikely(!(gr_status & GR_READY)))
51695 + return NULL;
51696 +
51697 + preempt_disable();
51698 + path = gr_to_filename_rbac(new_dentry, mnt);
51699 + match = lookup_name_entry_create(path);
51700 +
51701 + curracl = current->acl;
51702 +
51703 + if (match) {
51704 + read_lock(&gr_inode_lock);
51705 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
51706 + read_unlock(&gr_inode_lock);
51707 +
51708 + if (matchpo) {
51709 + preempt_enable();
51710 + return matchpo;
51711 + }
51712 + }
51713 +
51714 + // lookup parent
51715 +
51716 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
51717 +
51718 + preempt_enable();
51719 + return matchpo;
51720 +}
51721 +
51722 +__u32
51723 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
51724 + const struct vfsmount * mnt, const __u32 mode)
51725 +{
51726 + struct acl_object_label *matchpo;
51727 + __u32 retval;
51728 +
51729 + if (unlikely(!(gr_status & GR_READY)))
51730 + return (mode & ~GR_AUDITS);
51731 +
51732 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
51733 +
51734 + retval = matchpo->mode & mode;
51735 +
51736 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
51737 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51738 + __u32 new_mode = mode;
51739 +
51740 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51741 +
51742 + gr_log_learn(new_dentry, mnt, new_mode);
51743 + return new_mode;
51744 + }
51745 +
51746 + return retval;
51747 +}
51748 +
51749 +__u32
51750 +gr_check_link(const struct dentry * new_dentry,
51751 + const struct dentry * parent_dentry,
51752 + const struct vfsmount * parent_mnt,
51753 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
51754 +{
51755 + struct acl_object_label *obj;
51756 + __u32 oldmode, newmode;
51757 + __u32 needmode;
51758 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
51759 + GR_DELETE | GR_INHERIT;
51760 +
51761 + if (unlikely(!(gr_status & GR_READY)))
51762 + return (GR_CREATE | GR_LINK);
51763 +
51764 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
51765 + oldmode = obj->mode;
51766 +
51767 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
51768 + newmode = obj->mode;
51769 +
51770 + needmode = newmode & checkmodes;
51771 +
51772 + // old name for hardlink must have at least the permissions of the new name
51773 + if ((oldmode & needmode) != needmode)
51774 + goto bad;
51775 +
51776 + // if old name had restrictions/auditing, make sure the new name does as well
51777 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
51778 +
51779 + // don't allow hardlinking of suid/sgid files without permission
51780 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
51781 + needmode |= GR_SETID;
51782 +
51783 + if ((newmode & needmode) != needmode)
51784 + goto bad;
51785 +
51786 + // enforce minimum permissions
51787 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
51788 + return newmode;
51789 +bad:
51790 + needmode = oldmode;
51791 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
51792 + needmode |= GR_SETID;
51793 +
51794 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
51795 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
51796 + return (GR_CREATE | GR_LINK);
51797 + } else if (newmode & GR_SUPPRESS)
51798 + return GR_SUPPRESS;
51799 + else
51800 + return 0;
51801 +}
51802 +
51803 +int
51804 +gr_check_hidden_task(const struct task_struct *task)
51805 +{
51806 + if (unlikely(!(gr_status & GR_READY)))
51807 + return 0;
51808 +
51809 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
51810 + return 1;
51811 +
51812 + return 0;
51813 +}
51814 +
51815 +int
51816 +gr_check_protected_task(const struct task_struct *task)
51817 +{
51818 + if (unlikely(!(gr_status & GR_READY) || !task))
51819 + return 0;
51820 +
51821 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
51822 + task->acl != current->acl)
51823 + return 1;
51824 +
51825 + return 0;
51826 +}
51827 +
51828 +int
51829 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
51830 +{
51831 + struct task_struct *p;
51832 + int ret = 0;
51833 +
51834 + if (unlikely(!(gr_status & GR_READY) || !pid))
51835 + return ret;
51836 +
51837 + read_lock(&tasklist_lock);
51838 + do_each_pid_task(pid, type, p) {
51839 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
51840 + p->acl != current->acl) {
51841 + ret = 1;
51842 + goto out;
51843 + }
51844 + } while_each_pid_task(pid, type, p);
51845 +out:
51846 + read_unlock(&tasklist_lock);
51847 +
51848 + return ret;
51849 +}
51850 +
51851 +void
51852 +gr_copy_label(struct task_struct *tsk)
51853 +{
51854 + /* plain copying of fields is already done by dup_task_struct */
51855 + tsk->signal->used_accept = 0;
51856 + tsk->acl_sp_role = 0;
51857 + //tsk->acl_role_id = current->acl_role_id;
51858 + //tsk->acl = current->acl;
51859 + //tsk->role = current->role;
51860 + tsk->signal->curr_ip = current->signal->curr_ip;
51861 + tsk->signal->saved_ip = current->signal->saved_ip;
51862 + if (current->exec_file)
51863 + get_file(current->exec_file);
51864 + //tsk->exec_file = current->exec_file;
51865 + //tsk->is_writable = current->is_writable;
51866 + if (unlikely(current->signal->used_accept)) {
51867 + current->signal->curr_ip = 0;
51868 + current->signal->saved_ip = 0;
51869 + }
51870 +
51871 + return;
51872 +}
51873 +
51874 +static void
51875 +gr_set_proc_res(struct task_struct *task)
51876 +{
51877 + struct acl_subject_label *proc;
51878 + unsigned short i;
51879 +
51880 + proc = task->acl;
51881 +
51882 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
51883 + return;
51884 +
51885 + for (i = 0; i < RLIM_NLIMITS; i++) {
51886 + if (!(proc->resmask & (1 << i)))
51887 + continue;
51888 +
51889 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
51890 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
51891 + }
51892 +
51893 + return;
51894 +}
51895 +
51896 +extern int __gr_process_user_ban(struct user_struct *user);
51897 +
51898 +int
51899 +gr_check_user_change(int real, int effective, int fs)
51900 +{
51901 + unsigned int i;
51902 + __u16 num;
51903 + uid_t *uidlist;
51904 + int curuid;
51905 + int realok = 0;
51906 + int effectiveok = 0;
51907 + int fsok = 0;
51908 +
51909 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51910 + struct user_struct *user;
51911 +
51912 + if (real == -1)
51913 + goto skipit;
51914 +
51915 + user = find_user(real);
51916 + if (user == NULL)
51917 + goto skipit;
51918 +
51919 + if (__gr_process_user_ban(user)) {
51920 + /* for find_user */
51921 + free_uid(user);
51922 + return 1;
51923 + }
51924 +
51925 + /* for find_user */
51926 + free_uid(user);
51927 +
51928 +skipit:
51929 +#endif
51930 +
51931 + if (unlikely(!(gr_status & GR_READY)))
51932 + return 0;
51933 +
51934 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51935 + gr_log_learn_id_change('u', real, effective, fs);
51936 +
51937 + num = current->acl->user_trans_num;
51938 + uidlist = current->acl->user_transitions;
51939 +
51940 + if (uidlist == NULL)
51941 + return 0;
51942 +
51943 + if (real == -1)
51944 + realok = 1;
51945 + if (effective == -1)
51946 + effectiveok = 1;
51947 + if (fs == -1)
51948 + fsok = 1;
51949 +
51950 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
51951 + for (i = 0; i < num; i++) {
51952 + curuid = (int)uidlist[i];
51953 + if (real == curuid)
51954 + realok = 1;
51955 + if (effective == curuid)
51956 + effectiveok = 1;
51957 + if (fs == curuid)
51958 + fsok = 1;
51959 + }
51960 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
51961 + for (i = 0; i < num; i++) {
51962 + curuid = (int)uidlist[i];
51963 + if (real == curuid)
51964 + break;
51965 + if (effective == curuid)
51966 + break;
51967 + if (fs == curuid)
51968 + break;
51969 + }
51970 + /* not in deny list */
51971 + if (i == num) {
51972 + realok = 1;
51973 + effectiveok = 1;
51974 + fsok = 1;
51975 + }
51976 + }
51977 +
51978 + if (realok && effectiveok && fsok)
51979 + return 0;
51980 + else {
51981 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
51982 + return 1;
51983 + }
51984 +}
51985 +
51986 +int
51987 +gr_check_group_change(int real, int effective, int fs)
51988 +{
51989 + unsigned int i;
51990 + __u16 num;
51991 + gid_t *gidlist;
51992 + int curgid;
51993 + int realok = 0;
51994 + int effectiveok = 0;
51995 + int fsok = 0;
51996 +
51997 + if (unlikely(!(gr_status & GR_READY)))
51998 + return 0;
51999 +
52000 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52001 + gr_log_learn_id_change('g', real, effective, fs);
52002 +
52003 + num = current->acl->group_trans_num;
52004 + gidlist = current->acl->group_transitions;
52005 +
52006 + if (gidlist == NULL)
52007 + return 0;
52008 +
52009 + if (real == -1)
52010 + realok = 1;
52011 + if (effective == -1)
52012 + effectiveok = 1;
52013 + if (fs == -1)
52014 + fsok = 1;
52015 +
52016 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52017 + for (i = 0; i < num; i++) {
52018 + curgid = (int)gidlist[i];
52019 + if (real == curgid)
52020 + realok = 1;
52021 + if (effective == curgid)
52022 + effectiveok = 1;
52023 + if (fs == curgid)
52024 + fsok = 1;
52025 + }
52026 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52027 + for (i = 0; i < num; i++) {
52028 + curgid = (int)gidlist[i];
52029 + if (real == curgid)
52030 + break;
52031 + if (effective == curgid)
52032 + break;
52033 + if (fs == curgid)
52034 + break;
52035 + }
52036 + /* not in deny list */
52037 + if (i == num) {
52038 + realok = 1;
52039 + effectiveok = 1;
52040 + fsok = 1;
52041 + }
52042 + }
52043 +
52044 + if (realok && effectiveok && fsok)
52045 + return 0;
52046 + else {
52047 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52048 + return 1;
52049 + }
52050 +}
52051 +
52052 +extern int gr_acl_is_capable(const int cap);
52053 +
52054 +void
52055 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52056 +{
52057 + struct acl_role_label *role = task->role;
52058 + struct acl_subject_label *subj = NULL;
52059 + struct acl_object_label *obj;
52060 + struct file *filp;
52061 +
52062 + if (unlikely(!(gr_status & GR_READY)))
52063 + return;
52064 +
52065 + filp = task->exec_file;
52066 +
52067 + /* kernel process, we'll give them the kernel role */
52068 + if (unlikely(!filp)) {
52069 + task->role = kernel_role;
52070 + task->acl = kernel_role->root_label;
52071 + return;
52072 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52073 + role = lookup_acl_role_label(task, uid, gid);
52074 +
52075 + /* don't change the role if we're not a privileged process */
52076 + if (role && task->role != role &&
52077 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52078 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52079 + return;
52080 +
52081 + /* perform subject lookup in possibly new role
52082 + we can use this result below in the case where role == task->role
52083 + */
52084 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52085 +
52086 + /* if we changed uid/gid, but result in the same role
52087 + and are using inheritance, don't lose the inherited subject
52088 + if current subject is other than what normal lookup
52089 + would result in, we arrived via inheritance, don't
52090 + lose subject
52091 + */
52092 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52093 + (subj == task->acl)))
52094 + task->acl = subj;
52095 +
52096 + task->role = role;
52097 +
52098 + task->is_writable = 0;
52099 +
52100 + /* ignore additional mmap checks for processes that are writable
52101 + by the default ACL */
52102 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52103 + if (unlikely(obj->mode & GR_WRITE))
52104 + task->is_writable = 1;
52105 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52106 + if (unlikely(obj->mode & GR_WRITE))
52107 + task->is_writable = 1;
52108 +
52109 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52110 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52111 +#endif
52112 +
52113 + gr_set_proc_res(task);
52114 +
52115 + return;
52116 +}
52117 +
52118 +int
52119 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52120 + const int unsafe_flags)
52121 +{
52122 + struct task_struct *task = current;
52123 + struct acl_subject_label *newacl;
52124 + struct acl_object_label *obj;
52125 + __u32 retmode;
52126 +
52127 + if (unlikely(!(gr_status & GR_READY)))
52128 + return 0;
52129 +
52130 + newacl = chk_subj_label(dentry, mnt, task->role);
52131 +
52132 + task_lock(task);
52133 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52134 + !(task->role->roletype & GR_ROLE_GOD) &&
52135 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52136 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52137 + task_unlock(task);
52138 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52139 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52140 + else
52141 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52142 + return -EACCES;
52143 + }
52144 + task_unlock(task);
52145 +
52146 + obj = chk_obj_label(dentry, mnt, task->acl);
52147 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52148 +
52149 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52150 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52151 + if (obj->nested)
52152 + task->acl = obj->nested;
52153 + else
52154 + task->acl = newacl;
52155 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52156 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52157 +
52158 + task->is_writable = 0;
52159 +
52160 + /* ignore additional mmap checks for processes that are writable
52161 + by the default ACL */
52162 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52163 + if (unlikely(obj->mode & GR_WRITE))
52164 + task->is_writable = 1;
52165 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52166 + if (unlikely(obj->mode & GR_WRITE))
52167 + task->is_writable = 1;
52168 +
52169 + gr_set_proc_res(task);
52170 +
52171 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52172 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52173 +#endif
52174 + return 0;
52175 +}
52176 +
52177 +/* always called with valid inodev ptr */
52178 +static void
52179 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52180 +{
52181 + struct acl_object_label *matchpo;
52182 + struct acl_subject_label *matchps;
52183 + struct acl_subject_label *subj;
52184 + struct acl_role_label *role;
52185 + unsigned int x;
52186 +
52187 + FOR_EACH_ROLE_START(role)
52188 + FOR_EACH_SUBJECT_START(role, subj, x)
52189 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52190 + matchpo->mode |= GR_DELETED;
52191 + FOR_EACH_SUBJECT_END(subj,x)
52192 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52193 + if (subj->inode == ino && subj->device == dev)
52194 + subj->mode |= GR_DELETED;
52195 + FOR_EACH_NESTED_SUBJECT_END(subj)
52196 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52197 + matchps->mode |= GR_DELETED;
52198 + FOR_EACH_ROLE_END(role)
52199 +
52200 + inodev->nentry->deleted = 1;
52201 +
52202 + return;
52203 +}
52204 +
52205 +void
52206 +gr_handle_delete(const ino_t ino, const dev_t dev)
52207 +{
52208 + struct inodev_entry *inodev;
52209 +
52210 + if (unlikely(!(gr_status & GR_READY)))
52211 + return;
52212 +
52213 + write_lock(&gr_inode_lock);
52214 + inodev = lookup_inodev_entry(ino, dev);
52215 + if (inodev != NULL)
52216 + do_handle_delete(inodev, ino, dev);
52217 + write_unlock(&gr_inode_lock);
52218 +
52219 + return;
52220 +}
52221 +
52222 +static void
52223 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52224 + const ino_t newinode, const dev_t newdevice,
52225 + struct acl_subject_label *subj)
52226 +{
52227 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52228 + struct acl_object_label *match;
52229 +
52230 + match = subj->obj_hash[index];
52231 +
52232 + while (match && (match->inode != oldinode ||
52233 + match->device != olddevice ||
52234 + !(match->mode & GR_DELETED)))
52235 + match = match->next;
52236 +
52237 + if (match && (match->inode == oldinode)
52238 + && (match->device == olddevice)
52239 + && (match->mode & GR_DELETED)) {
52240 + if (match->prev == NULL) {
52241 + subj->obj_hash[index] = match->next;
52242 + if (match->next != NULL)
52243 + match->next->prev = NULL;
52244 + } else {
52245 + match->prev->next = match->next;
52246 + if (match->next != NULL)
52247 + match->next->prev = match->prev;
52248 + }
52249 + match->prev = NULL;
52250 + match->next = NULL;
52251 + match->inode = newinode;
52252 + match->device = newdevice;
52253 + match->mode &= ~GR_DELETED;
52254 +
52255 + insert_acl_obj_label(match, subj);
52256 + }
52257 +
52258 + return;
52259 +}
52260 +
52261 +static void
52262 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52263 + const ino_t newinode, const dev_t newdevice,
52264 + struct acl_role_label *role)
52265 +{
52266 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52267 + struct acl_subject_label *match;
52268 +
52269 + match = role->subj_hash[index];
52270 +
52271 + while (match && (match->inode != oldinode ||
52272 + match->device != olddevice ||
52273 + !(match->mode & GR_DELETED)))
52274 + match = match->next;
52275 +
52276 + if (match && (match->inode == oldinode)
52277 + && (match->device == olddevice)
52278 + && (match->mode & GR_DELETED)) {
52279 + if (match->prev == NULL) {
52280 + role->subj_hash[index] = match->next;
52281 + if (match->next != NULL)
52282 + match->next->prev = NULL;
52283 + } else {
52284 + match->prev->next = match->next;
52285 + if (match->next != NULL)
52286 + match->next->prev = match->prev;
52287 + }
52288 + match->prev = NULL;
52289 + match->next = NULL;
52290 + match->inode = newinode;
52291 + match->device = newdevice;
52292 + match->mode &= ~GR_DELETED;
52293 +
52294 + insert_acl_subj_label(match, role);
52295 + }
52296 +
52297 + return;
52298 +}
52299 +
52300 +static void
52301 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52302 + const ino_t newinode, const dev_t newdevice)
52303 +{
52304 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52305 + struct inodev_entry *match;
52306 +
52307 + match = inodev_set.i_hash[index];
52308 +
52309 + while (match && (match->nentry->inode != oldinode ||
52310 + match->nentry->device != olddevice || !match->nentry->deleted))
52311 + match = match->next;
52312 +
52313 + if (match && (match->nentry->inode == oldinode)
52314 + && (match->nentry->device == olddevice) &&
52315 + match->nentry->deleted) {
52316 + if (match->prev == NULL) {
52317 + inodev_set.i_hash[index] = match->next;
52318 + if (match->next != NULL)
52319 + match->next->prev = NULL;
52320 + } else {
52321 + match->prev->next = match->next;
52322 + if (match->next != NULL)
52323 + match->next->prev = match->prev;
52324 + }
52325 + match->prev = NULL;
52326 + match->next = NULL;
52327 + match->nentry->inode = newinode;
52328 + match->nentry->device = newdevice;
52329 + match->nentry->deleted = 0;
52330 +
52331 + insert_inodev_entry(match);
52332 + }
52333 +
52334 + return;
52335 +}
52336 +
52337 +static void
52338 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52339 +{
52340 + struct acl_subject_label *subj;
52341 + struct acl_role_label *role;
52342 + unsigned int x;
52343 +
52344 + FOR_EACH_ROLE_START(role)
52345 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52346 +
52347 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52348 + if ((subj->inode == ino) && (subj->device == dev)) {
52349 + subj->inode = ino;
52350 + subj->device = dev;
52351 + }
52352 + FOR_EACH_NESTED_SUBJECT_END(subj)
52353 + FOR_EACH_SUBJECT_START(role, subj, x)
52354 + update_acl_obj_label(matchn->inode, matchn->device,
52355 + ino, dev, subj);
52356 + FOR_EACH_SUBJECT_END(subj,x)
52357 + FOR_EACH_ROLE_END(role)
52358 +
52359 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52360 +
52361 + return;
52362 +}
52363 +
52364 +static void
52365 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52366 + const struct vfsmount *mnt)
52367 +{
52368 + ino_t ino = dentry->d_inode->i_ino;
52369 + dev_t dev = __get_dev(dentry);
52370 +
52371 + __do_handle_create(matchn, ino, dev);
52372 +
52373 + return;
52374 +}
52375 +
52376 +void
52377 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52378 +{
52379 + struct name_entry *matchn;
52380 +
52381 + if (unlikely(!(gr_status & GR_READY)))
52382 + return;
52383 +
52384 + preempt_disable();
52385 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52386 +
52387 + if (unlikely((unsigned long)matchn)) {
52388 + write_lock(&gr_inode_lock);
52389 + do_handle_create(matchn, dentry, mnt);
52390 + write_unlock(&gr_inode_lock);
52391 + }
52392 + preempt_enable();
52393 +
52394 + return;
52395 +}
52396 +
52397 +void
52398 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52399 +{
52400 + struct name_entry *matchn;
52401 +
52402 + if (unlikely(!(gr_status & GR_READY)))
52403 + return;
52404 +
52405 + preempt_disable();
52406 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52407 +
52408 + if (unlikely((unsigned long)matchn)) {
52409 + write_lock(&gr_inode_lock);
52410 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52411 + write_unlock(&gr_inode_lock);
52412 + }
52413 + preempt_enable();
52414 +
52415 + return;
52416 +}
52417 +
52418 +void
52419 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52420 + struct dentry *old_dentry,
52421 + struct dentry *new_dentry,
52422 + struct vfsmount *mnt, const __u8 replace)
52423 +{
52424 + struct name_entry *matchn;
52425 + struct inodev_entry *inodev;
52426 + struct inode *inode = new_dentry->d_inode;
52427 + ino_t old_ino = old_dentry->d_inode->i_ino;
52428 + dev_t old_dev = __get_dev(old_dentry);
52429 +
52430 + /* vfs_rename swaps the name and parent link for old_dentry and
52431 + new_dentry
52432 + at this point, old_dentry has the new name, parent link, and inode
52433 + for the renamed file
52434 + if a file is being replaced by a rename, new_dentry has the inode
52435 + and name for the replaced file
52436 + */
52437 +
52438 + if (unlikely(!(gr_status & GR_READY)))
52439 + return;
52440 +
52441 + preempt_disable();
52442 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52443 +
52444 + /* we wouldn't have to check d_inode if it weren't for
52445 + NFS silly-renaming
52446 + */
52447 +
52448 + write_lock(&gr_inode_lock);
52449 + if (unlikely(replace && inode)) {
52450 + ino_t new_ino = inode->i_ino;
52451 + dev_t new_dev = __get_dev(new_dentry);
52452 +
52453 + inodev = lookup_inodev_entry(new_ino, new_dev);
52454 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52455 + do_handle_delete(inodev, new_ino, new_dev);
52456 + }
52457 +
52458 + inodev = lookup_inodev_entry(old_ino, old_dev);
52459 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52460 + do_handle_delete(inodev, old_ino, old_dev);
52461 +
52462 + if (unlikely((unsigned long)matchn))
52463 + do_handle_create(matchn, old_dentry, mnt);
52464 +
52465 + write_unlock(&gr_inode_lock);
52466 + preempt_enable();
52467 +
52468 + return;
52469 +}
52470 +
52471 +static int
52472 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52473 + unsigned char **sum)
52474 +{
52475 + struct acl_role_label *r;
52476 + struct role_allowed_ip *ipp;
52477 + struct role_transition *trans;
52478 + unsigned int i;
52479 + int found = 0;
52480 + u32 curr_ip = current->signal->curr_ip;
52481 +
52482 + current->signal->saved_ip = curr_ip;
52483 +
52484 + /* check transition table */
52485 +
52486 + for (trans = current->role->transitions; trans; trans = trans->next) {
52487 + if (!strcmp(rolename, trans->rolename)) {
52488 + found = 1;
52489 + break;
52490 + }
52491 + }
52492 +
52493 + if (!found)
52494 + return 0;
52495 +
52496 + /* handle special roles that do not require authentication
52497 + and check ip */
52498 +
52499 + FOR_EACH_ROLE_START(r)
52500 + if (!strcmp(rolename, r->rolename) &&
52501 + (r->roletype & GR_ROLE_SPECIAL)) {
52502 + found = 0;
52503 + if (r->allowed_ips != NULL) {
52504 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
52505 + if ((ntohl(curr_ip) & ipp->netmask) ==
52506 + (ntohl(ipp->addr) & ipp->netmask))
52507 + found = 1;
52508 + }
52509 + } else
52510 + found = 2;
52511 + if (!found)
52512 + return 0;
52513 +
52514 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
52515 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
52516 + *salt = NULL;
52517 + *sum = NULL;
52518 + return 1;
52519 + }
52520 + }
52521 + FOR_EACH_ROLE_END(r)
52522 +
52523 + for (i = 0; i < num_sprole_pws; i++) {
52524 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
52525 + *salt = acl_special_roles[i]->salt;
52526 + *sum = acl_special_roles[i]->sum;
52527 + return 1;
52528 + }
52529 + }
52530 +
52531 + return 0;
52532 +}
52533 +
52534 +static void
52535 +assign_special_role(char *rolename)
52536 +{
52537 + struct acl_object_label *obj;
52538 + struct acl_role_label *r;
52539 + struct acl_role_label *assigned = NULL;
52540 + struct task_struct *tsk;
52541 + struct file *filp;
52542 +
52543 + FOR_EACH_ROLE_START(r)
52544 + if (!strcmp(rolename, r->rolename) &&
52545 + (r->roletype & GR_ROLE_SPECIAL)) {
52546 + assigned = r;
52547 + break;
52548 + }
52549 + FOR_EACH_ROLE_END(r)
52550 +
52551 + if (!assigned)
52552 + return;
52553 +
52554 + read_lock(&tasklist_lock);
52555 + read_lock(&grsec_exec_file_lock);
52556 +
52557 + tsk = current->real_parent;
52558 + if (tsk == NULL)
52559 + goto out_unlock;
52560 +
52561 + filp = tsk->exec_file;
52562 + if (filp == NULL)
52563 + goto out_unlock;
52564 +
52565 + tsk->is_writable = 0;
52566 +
52567 + tsk->acl_sp_role = 1;
52568 + tsk->acl_role_id = ++acl_sp_role_value;
52569 + tsk->role = assigned;
52570 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
52571 +
52572 + /* ignore additional mmap checks for processes that are writable
52573 + by the default ACL */
52574 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52575 + if (unlikely(obj->mode & GR_WRITE))
52576 + tsk->is_writable = 1;
52577 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
52578 + if (unlikely(obj->mode & GR_WRITE))
52579 + tsk->is_writable = 1;
52580 +
52581 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52582 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
52583 +#endif
52584 +
52585 +out_unlock:
52586 + read_unlock(&grsec_exec_file_lock);
52587 + read_unlock(&tasklist_lock);
52588 + return;
52589 +}
52590 +
52591 +int gr_check_secure_terminal(struct task_struct *task)
52592 +{
52593 + struct task_struct *p, *p2, *p3;
52594 + struct files_struct *files;
52595 + struct fdtable *fdt;
52596 + struct file *our_file = NULL, *file;
52597 + int i;
52598 +
52599 + if (task->signal->tty == NULL)
52600 + return 1;
52601 +
52602 + files = get_files_struct(task);
52603 + if (files != NULL) {
52604 + rcu_read_lock();
52605 + fdt = files_fdtable(files);
52606 + for (i=0; i < fdt->max_fds; i++) {
52607 + file = fcheck_files(files, i);
52608 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
52609 + get_file(file);
52610 + our_file = file;
52611 + }
52612 + }
52613 + rcu_read_unlock();
52614 + put_files_struct(files);
52615 + }
52616 +
52617 + if (our_file == NULL)
52618 + return 1;
52619 +
52620 + read_lock(&tasklist_lock);
52621 + do_each_thread(p2, p) {
52622 + files = get_files_struct(p);
52623 + if (files == NULL ||
52624 + (p->signal && p->signal->tty == task->signal->tty)) {
52625 + if (files != NULL)
52626 + put_files_struct(files);
52627 + continue;
52628 + }
52629 + rcu_read_lock();
52630 + fdt = files_fdtable(files);
52631 + for (i=0; i < fdt->max_fds; i++) {
52632 + file = fcheck_files(files, i);
52633 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
52634 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
52635 + p3 = task;
52636 + while (p3->pid > 0) {
52637 + if (p3 == p)
52638 + break;
52639 + p3 = p3->real_parent;
52640 + }
52641 + if (p3 == p)
52642 + break;
52643 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
52644 + gr_handle_alertkill(p);
52645 + rcu_read_unlock();
52646 + put_files_struct(files);
52647 + read_unlock(&tasklist_lock);
52648 + fput(our_file);
52649 + return 0;
52650 + }
52651 + }
52652 + rcu_read_unlock();
52653 + put_files_struct(files);
52654 + } while_each_thread(p2, p);
52655 + read_unlock(&tasklist_lock);
52656 +
52657 + fput(our_file);
52658 + return 1;
52659 +}
52660 +
52661 +ssize_t
52662 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
52663 +{
52664 + struct gr_arg_wrapper uwrap;
52665 + unsigned char *sprole_salt = NULL;
52666 + unsigned char *sprole_sum = NULL;
52667 + int error = sizeof (struct gr_arg_wrapper);
52668 + int error2 = 0;
52669 +
52670 + mutex_lock(&gr_dev_mutex);
52671 +
52672 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
52673 + error = -EPERM;
52674 + goto out;
52675 + }
52676 +
52677 + if (count != sizeof (struct gr_arg_wrapper)) {
52678 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
52679 + error = -EINVAL;
52680 + goto out;
52681 + }
52682 +
52683 +
52684 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
52685 + gr_auth_expires = 0;
52686 + gr_auth_attempts = 0;
52687 + }
52688 +
52689 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
52690 + error = -EFAULT;
52691 + goto out;
52692 + }
52693 +
52694 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
52695 + error = -EINVAL;
52696 + goto out;
52697 + }
52698 +
52699 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
52700 + error = -EFAULT;
52701 + goto out;
52702 + }
52703 +
52704 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52705 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52706 + time_after(gr_auth_expires, get_seconds())) {
52707 + error = -EBUSY;
52708 + goto out;
52709 + }
52710 +
52711 + /* if non-root trying to do anything other than use a special role,
52712 + do not attempt authentication, do not count towards authentication
52713 + locking
52714 + */
52715 +
52716 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
52717 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52718 + current_uid()) {
52719 + error = -EPERM;
52720 + goto out;
52721 + }
52722 +
52723 + /* ensure pw and special role name are null terminated */
52724 +
52725 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
52726 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
52727 +
52728 + /* Okay.
52729 + * We have our enough of the argument structure..(we have yet
52730 + * to copy_from_user the tables themselves) . Copy the tables
52731 + * only if we need them, i.e. for loading operations. */
52732 +
52733 + switch (gr_usermode->mode) {
52734 + case GR_STATUS:
52735 + if (gr_status & GR_READY) {
52736 + error = 1;
52737 + if (!gr_check_secure_terminal(current))
52738 + error = 3;
52739 + } else
52740 + error = 2;
52741 + goto out;
52742 + case GR_SHUTDOWN:
52743 + if ((gr_status & GR_READY)
52744 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52745 + pax_open_kernel();
52746 + gr_status &= ~GR_READY;
52747 + pax_close_kernel();
52748 +
52749 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
52750 + free_variables();
52751 + memset(gr_usermode, 0, sizeof (struct gr_arg));
52752 + memset(gr_system_salt, 0, GR_SALT_LEN);
52753 + memset(gr_system_sum, 0, GR_SHA_LEN);
52754 + } else if (gr_status & GR_READY) {
52755 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
52756 + error = -EPERM;
52757 + } else {
52758 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
52759 + error = -EAGAIN;
52760 + }
52761 + break;
52762 + case GR_ENABLE:
52763 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
52764 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
52765 + else {
52766 + if (gr_status & GR_READY)
52767 + error = -EAGAIN;
52768 + else
52769 + error = error2;
52770 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
52771 + }
52772 + break;
52773 + case GR_RELOAD:
52774 + if (!(gr_status & GR_READY)) {
52775 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
52776 + error = -EAGAIN;
52777 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52778 + preempt_disable();
52779 +
52780 + pax_open_kernel();
52781 + gr_status &= ~GR_READY;
52782 + pax_close_kernel();
52783 +
52784 + free_variables();
52785 + if (!(error2 = gracl_init(gr_usermode))) {
52786 + preempt_enable();
52787 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
52788 + } else {
52789 + preempt_enable();
52790 + error = error2;
52791 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
52792 + }
52793 + } else {
52794 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
52795 + error = -EPERM;
52796 + }
52797 + break;
52798 + case GR_SEGVMOD:
52799 + if (unlikely(!(gr_status & GR_READY))) {
52800 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
52801 + error = -EAGAIN;
52802 + break;
52803 + }
52804 +
52805 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52806 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
52807 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
52808 + struct acl_subject_label *segvacl;
52809 + segvacl =
52810 + lookup_acl_subj_label(gr_usermode->segv_inode,
52811 + gr_usermode->segv_device,
52812 + current->role);
52813 + if (segvacl) {
52814 + segvacl->crashes = 0;
52815 + segvacl->expires = 0;
52816 + }
52817 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
52818 + gr_remove_uid(gr_usermode->segv_uid);
52819 + }
52820 + } else {
52821 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
52822 + error = -EPERM;
52823 + }
52824 + break;
52825 + case GR_SPROLE:
52826 + case GR_SPROLEPAM:
52827 + if (unlikely(!(gr_status & GR_READY))) {
52828 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
52829 + error = -EAGAIN;
52830 + break;
52831 + }
52832 +
52833 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
52834 + current->role->expires = 0;
52835 + current->role->auth_attempts = 0;
52836 + }
52837 +
52838 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52839 + time_after(current->role->expires, get_seconds())) {
52840 + error = -EBUSY;
52841 + goto out;
52842 + }
52843 +
52844 + if (lookup_special_role_auth
52845 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
52846 + && ((!sprole_salt && !sprole_sum)
52847 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
52848 + char *p = "";
52849 + assign_special_role(gr_usermode->sp_role);
52850 + read_lock(&tasklist_lock);
52851 + if (current->real_parent)
52852 + p = current->real_parent->role->rolename;
52853 + read_unlock(&tasklist_lock);
52854 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
52855 + p, acl_sp_role_value);
52856 + } else {
52857 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
52858 + error = -EPERM;
52859 + if(!(current->role->auth_attempts++))
52860 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
52861 +
52862 + goto out;
52863 + }
52864 + break;
52865 + case GR_UNSPROLE:
52866 + if (unlikely(!(gr_status & GR_READY))) {
52867 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
52868 + error = -EAGAIN;
52869 + break;
52870 + }
52871 +
52872 + if (current->role->roletype & GR_ROLE_SPECIAL) {
52873 + char *p = "";
52874 + int i = 0;
52875 +
52876 + read_lock(&tasklist_lock);
52877 + if (current->real_parent) {
52878 + p = current->real_parent->role->rolename;
52879 + i = current->real_parent->acl_role_id;
52880 + }
52881 + read_unlock(&tasklist_lock);
52882 +
52883 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
52884 + gr_set_acls(1);
52885 + } else {
52886 + error = -EPERM;
52887 + goto out;
52888 + }
52889 + break;
52890 + default:
52891 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
52892 + error = -EINVAL;
52893 + break;
52894 + }
52895 +
52896 + if (error != -EPERM)
52897 + goto out;
52898 +
52899 + if(!(gr_auth_attempts++))
52900 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
52901 +
52902 + out:
52903 + mutex_unlock(&gr_dev_mutex);
52904 + return error;
52905 +}
52906 +
52907 +/* must be called with
52908 + rcu_read_lock();
52909 + read_lock(&tasklist_lock);
52910 + read_lock(&grsec_exec_file_lock);
52911 +*/
52912 +int gr_apply_subject_to_task(struct task_struct *task)
52913 +{
52914 + struct acl_object_label *obj;
52915 + char *tmpname;
52916 + struct acl_subject_label *tmpsubj;
52917 + struct file *filp;
52918 + struct name_entry *nmatch;
52919 +
52920 + filp = task->exec_file;
52921 + if (filp == NULL)
52922 + return 0;
52923 +
52924 + /* the following is to apply the correct subject
52925 + on binaries running when the RBAC system
52926 + is enabled, when the binaries have been
52927 + replaced or deleted since their execution
52928 + -----
52929 + when the RBAC system starts, the inode/dev
52930 + from exec_file will be one the RBAC system
52931 + is unaware of. It only knows the inode/dev
52932 + of the present file on disk, or the absence
52933 + of it.
52934 + */
52935 + preempt_disable();
52936 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
52937 +
52938 + nmatch = lookup_name_entry(tmpname);
52939 + preempt_enable();
52940 + tmpsubj = NULL;
52941 + if (nmatch) {
52942 + if (nmatch->deleted)
52943 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
52944 + else
52945 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
52946 + if (tmpsubj != NULL)
52947 + task->acl = tmpsubj;
52948 + }
52949 + if (tmpsubj == NULL)
52950 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
52951 + task->role);
52952 + if (task->acl) {
52953 + task->is_writable = 0;
52954 + /* ignore additional mmap checks for processes that are writable
52955 + by the default ACL */
52956 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52957 + if (unlikely(obj->mode & GR_WRITE))
52958 + task->is_writable = 1;
52959 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52960 + if (unlikely(obj->mode & GR_WRITE))
52961 + task->is_writable = 1;
52962 +
52963 + gr_set_proc_res(task);
52964 +
52965 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52966 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52967 +#endif
52968 + } else {
52969 + return 1;
52970 + }
52971 +
52972 + return 0;
52973 +}
52974 +
52975 +int
52976 +gr_set_acls(const int type)
52977 +{
52978 + struct task_struct *task, *task2;
52979 + struct acl_role_label *role = current->role;
52980 + __u16 acl_role_id = current->acl_role_id;
52981 + const struct cred *cred;
52982 + int ret;
52983 +
52984 + rcu_read_lock();
52985 + read_lock(&tasklist_lock);
52986 + read_lock(&grsec_exec_file_lock);
52987 + do_each_thread(task2, task) {
52988 + /* check to see if we're called from the exit handler,
52989 + if so, only replace ACLs that have inherited the admin
52990 + ACL */
52991 +
52992 + if (type && (task->role != role ||
52993 + task->acl_role_id != acl_role_id))
52994 + continue;
52995 +
52996 + task->acl_role_id = 0;
52997 + task->acl_sp_role = 0;
52998 +
52999 + if (task->exec_file) {
53000 + cred = __task_cred(task);
53001 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53002 + ret = gr_apply_subject_to_task(task);
53003 + if (ret) {
53004 + read_unlock(&grsec_exec_file_lock);
53005 + read_unlock(&tasklist_lock);
53006 + rcu_read_unlock();
53007 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53008 + return ret;
53009 + }
53010 + } else {
53011 + // it's a kernel process
53012 + task->role = kernel_role;
53013 + task->acl = kernel_role->root_label;
53014 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53015 + task->acl->mode &= ~GR_PROCFIND;
53016 +#endif
53017 + }
53018 + } while_each_thread(task2, task);
53019 + read_unlock(&grsec_exec_file_lock);
53020 + read_unlock(&tasklist_lock);
53021 + rcu_read_unlock();
53022 +
53023 + return 0;
53024 +}
53025 +
53026 +void
53027 +gr_learn_resource(const struct task_struct *task,
53028 + const int res, const unsigned long wanted, const int gt)
53029 +{
53030 + struct acl_subject_label *acl;
53031 + const struct cred *cred;
53032 +
53033 + if (unlikely((gr_status & GR_READY) &&
53034 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53035 + goto skip_reslog;
53036 +
53037 +#ifdef CONFIG_GRKERNSEC_RESLOG
53038 + gr_log_resource(task, res, wanted, gt);
53039 +#endif
53040 + skip_reslog:
53041 +
53042 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53043 + return;
53044 +
53045 + acl = task->acl;
53046 +
53047 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53048 + !(acl->resmask & (1 << (unsigned short) res))))
53049 + return;
53050 +
53051 + if (wanted >= acl->res[res].rlim_cur) {
53052 + unsigned long res_add;
53053 +
53054 + res_add = wanted;
53055 + switch (res) {
53056 + case RLIMIT_CPU:
53057 + res_add += GR_RLIM_CPU_BUMP;
53058 + break;
53059 + case RLIMIT_FSIZE:
53060 + res_add += GR_RLIM_FSIZE_BUMP;
53061 + break;
53062 + case RLIMIT_DATA:
53063 + res_add += GR_RLIM_DATA_BUMP;
53064 + break;
53065 + case RLIMIT_STACK:
53066 + res_add += GR_RLIM_STACK_BUMP;
53067 + break;
53068 + case RLIMIT_CORE:
53069 + res_add += GR_RLIM_CORE_BUMP;
53070 + break;
53071 + case RLIMIT_RSS:
53072 + res_add += GR_RLIM_RSS_BUMP;
53073 + break;
53074 + case RLIMIT_NPROC:
53075 + res_add += GR_RLIM_NPROC_BUMP;
53076 + break;
53077 + case RLIMIT_NOFILE:
53078 + res_add += GR_RLIM_NOFILE_BUMP;
53079 + break;
53080 + case RLIMIT_MEMLOCK:
53081 + res_add += GR_RLIM_MEMLOCK_BUMP;
53082 + break;
53083 + case RLIMIT_AS:
53084 + res_add += GR_RLIM_AS_BUMP;
53085 + break;
53086 + case RLIMIT_LOCKS:
53087 + res_add += GR_RLIM_LOCKS_BUMP;
53088 + break;
53089 + case RLIMIT_SIGPENDING:
53090 + res_add += GR_RLIM_SIGPENDING_BUMP;
53091 + break;
53092 + case RLIMIT_MSGQUEUE:
53093 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53094 + break;
53095 + case RLIMIT_NICE:
53096 + res_add += GR_RLIM_NICE_BUMP;
53097 + break;
53098 + case RLIMIT_RTPRIO:
53099 + res_add += GR_RLIM_RTPRIO_BUMP;
53100 + break;
53101 + case RLIMIT_RTTIME:
53102 + res_add += GR_RLIM_RTTIME_BUMP;
53103 + break;
53104 + }
53105 +
53106 + acl->res[res].rlim_cur = res_add;
53107 +
53108 + if (wanted > acl->res[res].rlim_max)
53109 + acl->res[res].rlim_max = res_add;
53110 +
53111 + /* only log the subject filename, since resource logging is supported for
53112 + single-subject learning only */
53113 + rcu_read_lock();
53114 + cred = __task_cred(task);
53115 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53116 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53117 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53118 + "", (unsigned long) res, &task->signal->saved_ip);
53119 + rcu_read_unlock();
53120 + }
53121 +
53122 + return;
53123 +}
53124 +
53125 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53126 +void
53127 +pax_set_initial_flags(struct linux_binprm *bprm)
53128 +{
53129 + struct task_struct *task = current;
53130 + struct acl_subject_label *proc;
53131 + unsigned long flags;
53132 +
53133 + if (unlikely(!(gr_status & GR_READY)))
53134 + return;
53135 +
53136 + flags = pax_get_flags(task);
53137 +
53138 + proc = task->acl;
53139 +
53140 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53141 + flags &= ~MF_PAX_PAGEEXEC;
53142 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53143 + flags &= ~MF_PAX_SEGMEXEC;
53144 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53145 + flags &= ~MF_PAX_RANDMMAP;
53146 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53147 + flags &= ~MF_PAX_EMUTRAMP;
53148 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53149 + flags &= ~MF_PAX_MPROTECT;
53150 +
53151 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53152 + flags |= MF_PAX_PAGEEXEC;
53153 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53154 + flags |= MF_PAX_SEGMEXEC;
53155 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53156 + flags |= MF_PAX_RANDMMAP;
53157 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53158 + flags |= MF_PAX_EMUTRAMP;
53159 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53160 + flags |= MF_PAX_MPROTECT;
53161 +
53162 + pax_set_flags(task, flags);
53163 +
53164 + return;
53165 +}
53166 +#endif
53167 +
53168 +#ifdef CONFIG_SYSCTL
53169 +/* Eric Biederman likes breaking userland ABI and every inode-based security
53170 + system to save 35kb of memory */
53171 +
53172 +/* we modify the passed in filename, but adjust it back before returning */
53173 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
53174 +{
53175 + struct name_entry *nmatch;
53176 + char *p, *lastp = NULL;
53177 + struct acl_object_label *obj = NULL, *tmp;
53178 + struct acl_subject_label *tmpsubj;
53179 + char c = '\0';
53180 +
53181 + read_lock(&gr_inode_lock);
53182 +
53183 + p = name + len - 1;
53184 + do {
53185 + nmatch = lookup_name_entry(name);
53186 + if (lastp != NULL)
53187 + *lastp = c;
53188 +
53189 + if (nmatch == NULL)
53190 + goto next_component;
53191 + tmpsubj = current->acl;
53192 + do {
53193 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
53194 + if (obj != NULL) {
53195 + tmp = obj->globbed;
53196 + while (tmp) {
53197 + if (!glob_match(tmp->filename, name)) {
53198 + obj = tmp;
53199 + goto found_obj;
53200 + }
53201 + tmp = tmp->next;
53202 + }
53203 + goto found_obj;
53204 + }
53205 + } while ((tmpsubj = tmpsubj->parent_subject));
53206 +next_component:
53207 + /* end case */
53208 + if (p == name)
53209 + break;
53210 +
53211 + while (*p != '/')
53212 + p--;
53213 + if (p == name)
53214 + lastp = p + 1;
53215 + else {
53216 + lastp = p;
53217 + p--;
53218 + }
53219 + c = *lastp;
53220 + *lastp = '\0';
53221 + } while (1);
53222 +found_obj:
53223 + read_unlock(&gr_inode_lock);
53224 + /* obj returned will always be non-null */
53225 + return obj;
53226 +}
53227 +
53228 +/* returns 0 when allowing, non-zero on error
53229 + op of 0 is used for readdir, so we don't log the names of hidden files
53230 +*/
53231 +__u32
53232 +gr_handle_sysctl(const struct ctl_table *table, const int op)
53233 +{
53234 + struct ctl_table *tmp;
53235 + const char *proc_sys = "/proc/sys";
53236 + char *path;
53237 + struct acl_object_label *obj;
53238 + unsigned short len = 0, pos = 0, depth = 0, i;
53239 + __u32 err = 0;
53240 + __u32 mode = 0;
53241 +
53242 + if (unlikely(!(gr_status & GR_READY)))
53243 + return 0;
53244 +
53245 + /* for now, ignore operations on non-sysctl entries if it's not a
53246 + readdir*/
53247 + if (table->child != NULL && op != 0)
53248 + return 0;
53249 +
53250 + mode |= GR_FIND;
53251 + /* it's only a read if it's an entry, read on dirs is for readdir */
53252 + if (op & MAY_READ)
53253 + mode |= GR_READ;
53254 + if (op & MAY_WRITE)
53255 + mode |= GR_WRITE;
53256 +
53257 + preempt_disable();
53258 +
53259 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53260 +
53261 + /* it's only a read/write if it's an actual entry, not a dir
53262 + (which are opened for readdir)
53263 + */
53264 +
53265 + /* convert the requested sysctl entry into a pathname */
53266 +
53267 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53268 + len += strlen(tmp->procname);
53269 + len++;
53270 + depth++;
53271 + }
53272 +
53273 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
53274 + /* deny */
53275 + goto out;
53276 + }
53277 +
53278 + memset(path, 0, PAGE_SIZE);
53279 +
53280 + memcpy(path, proc_sys, strlen(proc_sys));
53281 +
53282 + pos += strlen(proc_sys);
53283 +
53284 + for (; depth > 0; depth--) {
53285 + path[pos] = '/';
53286 + pos++;
53287 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53288 + if (depth == i) {
53289 + memcpy(path + pos, tmp->procname,
53290 + strlen(tmp->procname));
53291 + pos += strlen(tmp->procname);
53292 + }
53293 + i++;
53294 + }
53295 + }
53296 +
53297 + obj = gr_lookup_by_name(path, pos);
53298 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
53299 +
53300 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
53301 + ((err & mode) != mode))) {
53302 + __u32 new_mode = mode;
53303 +
53304 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53305 +
53306 + err = 0;
53307 + gr_log_learn_sysctl(path, new_mode);
53308 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
53309 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
53310 + err = -ENOENT;
53311 + } else if (!(err & GR_FIND)) {
53312 + err = -ENOENT;
53313 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
53314 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
53315 + path, (mode & GR_READ) ? " reading" : "",
53316 + (mode & GR_WRITE) ? " writing" : "");
53317 + err = -EACCES;
53318 + } else if ((err & mode) != mode) {
53319 + err = -EACCES;
53320 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
53321 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
53322 + path, (mode & GR_READ) ? " reading" : "",
53323 + (mode & GR_WRITE) ? " writing" : "");
53324 + err = 0;
53325 + } else
53326 + err = 0;
53327 +
53328 + out:
53329 + preempt_enable();
53330 +
53331 + return err;
53332 +}
53333 +#endif
53334 +
53335 +int
53336 +gr_handle_proc_ptrace(struct task_struct *task)
53337 +{
53338 + struct file *filp;
53339 + struct task_struct *tmp = task;
53340 + struct task_struct *curtemp = current;
53341 + __u32 retmode;
53342 +
53343 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53344 + if (unlikely(!(gr_status & GR_READY)))
53345 + return 0;
53346 +#endif
53347 +
53348 + read_lock(&tasklist_lock);
53349 + read_lock(&grsec_exec_file_lock);
53350 + filp = task->exec_file;
53351 +
53352 + while (tmp->pid > 0) {
53353 + if (tmp == curtemp)
53354 + break;
53355 + tmp = tmp->real_parent;
53356 + }
53357 +
53358 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53359 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53360 + read_unlock(&grsec_exec_file_lock);
53361 + read_unlock(&tasklist_lock);
53362 + return 1;
53363 + }
53364 +
53365 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53366 + if (!(gr_status & GR_READY)) {
53367 + read_unlock(&grsec_exec_file_lock);
53368 + read_unlock(&tasklist_lock);
53369 + return 0;
53370 + }
53371 +#endif
53372 +
53373 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53374 + read_unlock(&grsec_exec_file_lock);
53375 + read_unlock(&tasklist_lock);
53376 +
53377 + if (retmode & GR_NOPTRACE)
53378 + return 1;
53379 +
53380 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53381 + && (current->acl != task->acl || (current->acl != current->role->root_label
53382 + && current->pid != task->pid)))
53383 + return 1;
53384 +
53385 + return 0;
53386 +}
53387 +
53388 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53389 +{
53390 + if (unlikely(!(gr_status & GR_READY)))
53391 + return;
53392 +
53393 + if (!(current->role->roletype & GR_ROLE_GOD))
53394 + return;
53395 +
53396 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53397 + p->role->rolename, gr_task_roletype_to_char(p),
53398 + p->acl->filename);
53399 +}
53400 +
53401 +int
53402 +gr_handle_ptrace(struct task_struct *task, const long request)
53403 +{
53404 + struct task_struct *tmp = task;
53405 + struct task_struct *curtemp = current;
53406 + __u32 retmode;
53407 +
53408 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53409 + if (unlikely(!(gr_status & GR_READY)))
53410 + return 0;
53411 +#endif
53412 +
53413 + read_lock(&tasklist_lock);
53414 + while (tmp->pid > 0) {
53415 + if (tmp == curtemp)
53416 + break;
53417 + tmp = tmp->real_parent;
53418 + }
53419 +
53420 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53421 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53422 + read_unlock(&tasklist_lock);
53423 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53424 + return 1;
53425 + }
53426 + read_unlock(&tasklist_lock);
53427 +
53428 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53429 + if (!(gr_status & GR_READY))
53430 + return 0;
53431 +#endif
53432 +
53433 + read_lock(&grsec_exec_file_lock);
53434 + if (unlikely(!task->exec_file)) {
53435 + read_unlock(&grsec_exec_file_lock);
53436 + return 0;
53437 + }
53438 +
53439 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53440 + read_unlock(&grsec_exec_file_lock);
53441 +
53442 + if (retmode & GR_NOPTRACE) {
53443 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53444 + return 1;
53445 + }
53446 +
53447 + if (retmode & GR_PTRACERD) {
53448 + switch (request) {
53449 + case PTRACE_SEIZE:
53450 + case PTRACE_POKETEXT:
53451 + case PTRACE_POKEDATA:
53452 + case PTRACE_POKEUSR:
53453 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53454 + case PTRACE_SETREGS:
53455 + case PTRACE_SETFPREGS:
53456 +#endif
53457 +#ifdef CONFIG_X86
53458 + case PTRACE_SETFPXREGS:
53459 +#endif
53460 +#ifdef CONFIG_ALTIVEC
53461 + case PTRACE_SETVRREGS:
53462 +#endif
53463 + return 1;
53464 + default:
53465 + return 0;
53466 + }
53467 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53468 + !(current->role->roletype & GR_ROLE_GOD) &&
53469 + (current->acl != task->acl)) {
53470 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53471 + return 1;
53472 + }
53473 +
53474 + return 0;
53475 +}
53476 +
53477 +static int is_writable_mmap(const struct file *filp)
53478 +{
53479 + struct task_struct *task = current;
53480 + struct acl_object_label *obj, *obj2;
53481 +
53482 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53483 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53484 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53485 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53486 + task->role->root_label);
53487 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53488 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53489 + return 1;
53490 + }
53491 + }
53492 + return 0;
53493 +}
53494 +
53495 +int
53496 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53497 +{
53498 + __u32 mode;
53499 +
53500 + if (unlikely(!file || !(prot & PROT_EXEC)))
53501 + return 1;
53502 +
53503 + if (is_writable_mmap(file))
53504 + return 0;
53505 +
53506 + mode =
53507 + gr_search_file(file->f_path.dentry,
53508 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53509 + file->f_path.mnt);
53510 +
53511 + if (!gr_tpe_allow(file))
53512 + return 0;
53513 +
53514 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53515 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53516 + return 0;
53517 + } else if (unlikely(!(mode & GR_EXEC))) {
53518 + return 0;
53519 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53520 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53521 + return 1;
53522 + }
53523 +
53524 + return 1;
53525 +}
53526 +
53527 +int
53528 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53529 +{
53530 + __u32 mode;
53531 +
53532 + if (unlikely(!file || !(prot & PROT_EXEC)))
53533 + return 1;
53534 +
53535 + if (is_writable_mmap(file))
53536 + return 0;
53537 +
53538 + mode =
53539 + gr_search_file(file->f_path.dentry,
53540 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53541 + file->f_path.mnt);
53542 +
53543 + if (!gr_tpe_allow(file))
53544 + return 0;
53545 +
53546 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53547 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53548 + return 0;
53549 + } else if (unlikely(!(mode & GR_EXEC))) {
53550 + return 0;
53551 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53552 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53553 + return 1;
53554 + }
53555 +
53556 + return 1;
53557 +}
53558 +
53559 +void
53560 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53561 +{
53562 + unsigned long runtime;
53563 + unsigned long cputime;
53564 + unsigned int wday, cday;
53565 + __u8 whr, chr;
53566 + __u8 wmin, cmin;
53567 + __u8 wsec, csec;
53568 + struct timespec timeval;
53569 +
53570 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53571 + !(task->acl->mode & GR_PROCACCT)))
53572 + return;
53573 +
53574 + do_posix_clock_monotonic_gettime(&timeval);
53575 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53576 + wday = runtime / (3600 * 24);
53577 + runtime -= wday * (3600 * 24);
53578 + whr = runtime / 3600;
53579 + runtime -= whr * 3600;
53580 + wmin = runtime / 60;
53581 + runtime -= wmin * 60;
53582 + wsec = runtime;
53583 +
53584 + cputime = (task->utime + task->stime) / HZ;
53585 + cday = cputime / (3600 * 24);
53586 + cputime -= cday * (3600 * 24);
53587 + chr = cputime / 3600;
53588 + cputime -= chr * 3600;
53589 + cmin = cputime / 60;
53590 + cputime -= cmin * 60;
53591 + csec = cputime;
53592 +
53593 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53594 +
53595 + return;
53596 +}
53597 +
53598 +void gr_set_kernel_label(struct task_struct *task)
53599 +{
53600 + if (gr_status & GR_READY) {
53601 + task->role = kernel_role;
53602 + task->acl = kernel_role->root_label;
53603 + }
53604 + return;
53605 +}
53606 +
53607 +#ifdef CONFIG_TASKSTATS
53608 +int gr_is_taskstats_denied(int pid)
53609 +{
53610 + struct task_struct *task;
53611 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53612 + const struct cred *cred;
53613 +#endif
53614 + int ret = 0;
53615 +
53616 + /* restrict taskstats viewing to un-chrooted root users
53617 + who have the 'view' subject flag if the RBAC system is enabled
53618 + */
53619 +
53620 + rcu_read_lock();
53621 + read_lock(&tasklist_lock);
53622 + task = find_task_by_vpid(pid);
53623 + if (task) {
53624 +#ifdef CONFIG_GRKERNSEC_CHROOT
53625 + if (proc_is_chrooted(task))
53626 + ret = -EACCES;
53627 +#endif
53628 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53629 + cred = __task_cred(task);
53630 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53631 + if (cred->uid != 0)
53632 + ret = -EACCES;
53633 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53634 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53635 + ret = -EACCES;
53636 +#endif
53637 +#endif
53638 + if (gr_status & GR_READY) {
53639 + if (!(task->acl->mode & GR_VIEW))
53640 + ret = -EACCES;
53641 + }
53642 + } else
53643 + ret = -ENOENT;
53644 +
53645 + read_unlock(&tasklist_lock);
53646 + rcu_read_unlock();
53647 +
53648 + return ret;
53649 +}
53650 +#endif
53651 +
53652 +/* AUXV entries are filled via a descendant of search_binary_handler
53653 + after we've already applied the subject for the target
53654 +*/
53655 +int gr_acl_enable_at_secure(void)
53656 +{
53657 + if (unlikely(!(gr_status & GR_READY)))
53658 + return 0;
53659 +
53660 + if (current->acl->mode & GR_ATSECURE)
53661 + return 1;
53662 +
53663 + return 0;
53664 +}
53665 +
53666 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
53667 +{
53668 + struct task_struct *task = current;
53669 + struct dentry *dentry = file->f_path.dentry;
53670 + struct vfsmount *mnt = file->f_path.mnt;
53671 + struct acl_object_label *obj, *tmp;
53672 + struct acl_subject_label *subj;
53673 + unsigned int bufsize;
53674 + int is_not_root;
53675 + char *path;
53676 + dev_t dev = __get_dev(dentry);
53677 +
53678 + if (unlikely(!(gr_status & GR_READY)))
53679 + return 1;
53680 +
53681 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53682 + return 1;
53683 +
53684 + /* ignore Eric Biederman */
53685 + if (IS_PRIVATE(dentry->d_inode))
53686 + return 1;
53687 +
53688 + subj = task->acl;
53689 + do {
53690 + obj = lookup_acl_obj_label(ino, dev, subj);
53691 + if (obj != NULL)
53692 + return (obj->mode & GR_FIND) ? 1 : 0;
53693 + } while ((subj = subj->parent_subject));
53694 +
53695 + /* this is purely an optimization since we're looking for an object
53696 + for the directory we're doing a readdir on
53697 + if it's possible for any globbed object to match the entry we're
53698 + filling into the directory, then the object we find here will be
53699 + an anchor point with attached globbed objects
53700 + */
53701 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
53702 + if (obj->globbed == NULL)
53703 + return (obj->mode & GR_FIND) ? 1 : 0;
53704 +
53705 + is_not_root = ((obj->filename[0] == '/') &&
53706 + (obj->filename[1] == '\0')) ? 0 : 1;
53707 + bufsize = PAGE_SIZE - namelen - is_not_root;
53708 +
53709 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
53710 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
53711 + return 1;
53712 +
53713 + preempt_disable();
53714 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53715 + bufsize);
53716 +
53717 + bufsize = strlen(path);
53718 +
53719 + /* if base is "/", don't append an additional slash */
53720 + if (is_not_root)
53721 + *(path + bufsize) = '/';
53722 + memcpy(path + bufsize + is_not_root, name, namelen);
53723 + *(path + bufsize + namelen + is_not_root) = '\0';
53724 +
53725 + tmp = obj->globbed;
53726 + while (tmp) {
53727 + if (!glob_match(tmp->filename, path)) {
53728 + preempt_enable();
53729 + return (tmp->mode & GR_FIND) ? 1 : 0;
53730 + }
53731 + tmp = tmp->next;
53732 + }
53733 + preempt_enable();
53734 + return (obj->mode & GR_FIND) ? 1 : 0;
53735 +}
53736 +
53737 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
53738 +EXPORT_SYMBOL(gr_acl_is_enabled);
53739 +#endif
53740 +EXPORT_SYMBOL(gr_learn_resource);
53741 +EXPORT_SYMBOL(gr_set_kernel_label);
53742 +#ifdef CONFIG_SECURITY
53743 +EXPORT_SYMBOL(gr_check_user_change);
53744 +EXPORT_SYMBOL(gr_check_group_change);
53745 +#endif
53746 +
53747 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
53748 new file mode 100644
53749 index 0000000..34fefda
53750 --- /dev/null
53751 +++ b/grsecurity/gracl_alloc.c
53752 @@ -0,0 +1,105 @@
53753 +#include <linux/kernel.h>
53754 +#include <linux/mm.h>
53755 +#include <linux/slab.h>
53756 +#include <linux/vmalloc.h>
53757 +#include <linux/gracl.h>
53758 +#include <linux/grsecurity.h>
53759 +
53760 +static unsigned long alloc_stack_next = 1;
53761 +static unsigned long alloc_stack_size = 1;
53762 +static void **alloc_stack;
53763 +
53764 +static __inline__ int
53765 +alloc_pop(void)
53766 +{
53767 + if (alloc_stack_next == 1)
53768 + return 0;
53769 +
53770 + kfree(alloc_stack[alloc_stack_next - 2]);
53771 +
53772 + alloc_stack_next--;
53773 +
53774 + return 1;
53775 +}
53776 +
53777 +static __inline__ int
53778 +alloc_push(void *buf)
53779 +{
53780 + if (alloc_stack_next >= alloc_stack_size)
53781 + return 1;
53782 +
53783 + alloc_stack[alloc_stack_next - 1] = buf;
53784 +
53785 + alloc_stack_next++;
53786 +
53787 + return 0;
53788 +}
53789 +
53790 +void *
53791 +acl_alloc(unsigned long len)
53792 +{
53793 + void *ret = NULL;
53794 +
53795 + if (!len || len > PAGE_SIZE)
53796 + goto out;
53797 +
53798 + ret = kmalloc(len, GFP_KERNEL);
53799 +
53800 + if (ret) {
53801 + if (alloc_push(ret)) {
53802 + kfree(ret);
53803 + ret = NULL;
53804 + }
53805 + }
53806 +
53807 +out:
53808 + return ret;
53809 +}
53810 +
53811 +void *
53812 +acl_alloc_num(unsigned long num, unsigned long len)
53813 +{
53814 + if (!len || (num > (PAGE_SIZE / len)))
53815 + return NULL;
53816 +
53817 + return acl_alloc(num * len);
53818 +}
53819 +
53820 +void
53821 +acl_free_all(void)
53822 +{
53823 + if (gr_acl_is_enabled() || !alloc_stack)
53824 + return;
53825 +
53826 + while (alloc_pop()) ;
53827 +
53828 + if (alloc_stack) {
53829 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
53830 + kfree(alloc_stack);
53831 + else
53832 + vfree(alloc_stack);
53833 + }
53834 +
53835 + alloc_stack = NULL;
53836 + alloc_stack_size = 1;
53837 + alloc_stack_next = 1;
53838 +
53839 + return;
53840 +}
53841 +
53842 +int
53843 +acl_alloc_stack_init(unsigned long size)
53844 +{
53845 + if ((size * sizeof (void *)) <= PAGE_SIZE)
53846 + alloc_stack =
53847 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
53848 + else
53849 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
53850 +
53851 + alloc_stack_size = size;
53852 +
53853 + if (!alloc_stack)
53854 + return 0;
53855 + else
53856 + return 1;
53857 +}
53858 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
53859 new file mode 100644
53860 index 0000000..6d21049
53861 --- /dev/null
53862 +++ b/grsecurity/gracl_cap.c
53863 @@ -0,0 +1,110 @@
53864 +#include <linux/kernel.h>
53865 +#include <linux/module.h>
53866 +#include <linux/sched.h>
53867 +#include <linux/gracl.h>
53868 +#include <linux/grsecurity.h>
53869 +#include <linux/grinternal.h>
53870 +
53871 +extern const char *captab_log[];
53872 +extern int captab_log_entries;
53873 +
53874 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
53875 +{
53876 + struct acl_subject_label *curracl;
53877 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
53878 + kernel_cap_t cap_audit = __cap_empty_set;
53879 +
53880 + if (!gr_acl_is_enabled())
53881 + return 1;
53882 +
53883 + curracl = task->acl;
53884 +
53885 + cap_drop = curracl->cap_lower;
53886 + cap_mask = curracl->cap_mask;
53887 + cap_audit = curracl->cap_invert_audit;
53888 +
53889 + while ((curracl = curracl->parent_subject)) {
53890 + /* if the cap isn't specified in the current computed mask but is specified in the
53891 + current level subject, and is lowered in the current level subject, then add
53892 + it to the set of dropped capabilities
53893 + otherwise, add the current level subject's mask to the current computed mask
53894 + */
53895 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
53896 + cap_raise(cap_mask, cap);
53897 + if (cap_raised(curracl->cap_lower, cap))
53898 + cap_raise(cap_drop, cap);
53899 + if (cap_raised(curracl->cap_invert_audit, cap))
53900 + cap_raise(cap_audit, cap);
53901 + }
53902 + }
53903 +
53904 + if (!cap_raised(cap_drop, cap)) {
53905 + if (cap_raised(cap_audit, cap))
53906 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
53907 + return 1;
53908 + }
53909 +
53910 + curracl = task->acl;
53911 +
53912 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
53913 + && cap_raised(cred->cap_effective, cap)) {
53914 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53915 + task->role->roletype, cred->uid,
53916 + cred->gid, task->exec_file ?
53917 + gr_to_filename(task->exec_file->f_path.dentry,
53918 + task->exec_file->f_path.mnt) : curracl->filename,
53919 + curracl->filename, 0UL,
53920 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
53921 + return 1;
53922 + }
53923 +
53924 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
53925 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
53926 +
53927 + return 0;
53928 +}
53929 +
53930 +int
53931 +gr_acl_is_capable(const int cap)
53932 +{
53933 + return gr_task_acl_is_capable(current, current_cred(), cap);
53934 +}
53935 +
53936 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
53937 +{
53938 + struct acl_subject_label *curracl;
53939 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
53940 +
53941 + if (!gr_acl_is_enabled())
53942 + return 1;
53943 +
53944 + curracl = task->acl;
53945 +
53946 + cap_drop = curracl->cap_lower;
53947 + cap_mask = curracl->cap_mask;
53948 +
53949 + while ((curracl = curracl->parent_subject)) {
53950 + /* if the cap isn't specified in the current computed mask but is specified in the
53951 + current level subject, and is lowered in the current level subject, then add
53952 + it to the set of dropped capabilities
53953 + otherwise, add the current level subject's mask to the current computed mask
53954 + */
53955 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
53956 + cap_raise(cap_mask, cap);
53957 + if (cap_raised(curracl->cap_lower, cap))
53958 + cap_raise(cap_drop, cap);
53959 + }
53960 + }
53961 +
53962 + if (!cap_raised(cap_drop, cap))
53963 + return 1;
53964 +
53965 + return 0;
53966 +}
53967 +
53968 +int
53969 +gr_acl_is_capable_nolog(const int cap)
53970 +{
53971 + return gr_task_acl_is_capable_nolog(current, cap);
53972 +}
53973 +
53974 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
53975 new file mode 100644
53976 index 0000000..88d0e87
53977 --- /dev/null
53978 +++ b/grsecurity/gracl_fs.c
53979 @@ -0,0 +1,435 @@
53980 +#include <linux/kernel.h>
53981 +#include <linux/sched.h>
53982 +#include <linux/types.h>
53983 +#include <linux/fs.h>
53984 +#include <linux/file.h>
53985 +#include <linux/stat.h>
53986 +#include <linux/grsecurity.h>
53987 +#include <linux/grinternal.h>
53988 +#include <linux/gracl.h>
53989 +
53990 +umode_t
53991 +gr_acl_umask(void)
53992 +{
53993 + if (unlikely(!gr_acl_is_enabled()))
53994 + return 0;
53995 +
53996 + return current->role->umask;
53997 +}
53998 +
53999 +__u32
54000 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54001 + const struct vfsmount * mnt)
54002 +{
54003 + __u32 mode;
54004 +
54005 + if (unlikely(!dentry->d_inode))
54006 + return GR_FIND;
54007 +
54008 + mode =
54009 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54010 +
54011 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54012 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54013 + return mode;
54014 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54015 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54016 + return 0;
54017 + } else if (unlikely(!(mode & GR_FIND)))
54018 + return 0;
54019 +
54020 + return GR_FIND;
54021 +}
54022 +
54023 +__u32
54024 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54025 + int acc_mode)
54026 +{
54027 + __u32 reqmode = GR_FIND;
54028 + __u32 mode;
54029 +
54030 + if (unlikely(!dentry->d_inode))
54031 + return reqmode;
54032 +
54033 + if (acc_mode & MAY_APPEND)
54034 + reqmode |= GR_APPEND;
54035 + else if (acc_mode & MAY_WRITE)
54036 + reqmode |= GR_WRITE;
54037 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54038 + reqmode |= GR_READ;
54039 +
54040 + mode =
54041 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54042 + mnt);
54043 +
54044 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54045 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54046 + reqmode & GR_READ ? " reading" : "",
54047 + reqmode & GR_WRITE ? " writing" : reqmode &
54048 + GR_APPEND ? " appending" : "");
54049 + return reqmode;
54050 + } else
54051 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54052 + {
54053 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54054 + reqmode & GR_READ ? " reading" : "",
54055 + reqmode & GR_WRITE ? " writing" : reqmode &
54056 + GR_APPEND ? " appending" : "");
54057 + return 0;
54058 + } else if (unlikely((mode & reqmode) != reqmode))
54059 + return 0;
54060 +
54061 + return reqmode;
54062 +}
54063 +
54064 +__u32
54065 +gr_acl_handle_creat(const struct dentry * dentry,
54066 + const struct dentry * p_dentry,
54067 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54068 + const int imode)
54069 +{
54070 + __u32 reqmode = GR_WRITE | GR_CREATE;
54071 + __u32 mode;
54072 +
54073 + if (acc_mode & MAY_APPEND)
54074 + reqmode |= GR_APPEND;
54075 + // if a directory was required or the directory already exists, then
54076 + // don't count this open as a read
54077 + if ((acc_mode & MAY_READ) &&
54078 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54079 + reqmode |= GR_READ;
54080 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54081 + reqmode |= GR_SETID;
54082 +
54083 + mode =
54084 + gr_check_create(dentry, p_dentry, p_mnt,
54085 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54086 +
54087 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54088 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54089 + reqmode & GR_READ ? " reading" : "",
54090 + reqmode & GR_WRITE ? " writing" : reqmode &
54091 + GR_APPEND ? " appending" : "");
54092 + return reqmode;
54093 + } else
54094 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54095 + {
54096 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54097 + reqmode & GR_READ ? " reading" : "",
54098 + reqmode & GR_WRITE ? " writing" : reqmode &
54099 + GR_APPEND ? " appending" : "");
54100 + return 0;
54101 + } else if (unlikely((mode & reqmode) != reqmode))
54102 + return 0;
54103 +
54104 + return reqmode;
54105 +}
54106 +
54107 +__u32
54108 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54109 + const int fmode)
54110 +{
54111 + __u32 mode, reqmode = GR_FIND;
54112 +
54113 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54114 + reqmode |= GR_EXEC;
54115 + if (fmode & S_IWOTH)
54116 + reqmode |= GR_WRITE;
54117 + if (fmode & S_IROTH)
54118 + reqmode |= GR_READ;
54119 +
54120 + mode =
54121 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54122 + mnt);
54123 +
54124 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54125 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54126 + reqmode & GR_READ ? " reading" : "",
54127 + reqmode & GR_WRITE ? " writing" : "",
54128 + reqmode & GR_EXEC ? " executing" : "");
54129 + return reqmode;
54130 + } else
54131 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54132 + {
54133 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54134 + reqmode & GR_READ ? " reading" : "",
54135 + reqmode & GR_WRITE ? " writing" : "",
54136 + reqmode & GR_EXEC ? " executing" : "");
54137 + return 0;
54138 + } else if (unlikely((mode & reqmode) != reqmode))
54139 + return 0;
54140 +
54141 + return reqmode;
54142 +}
54143 +
54144 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54145 +{
54146 + __u32 mode;
54147 +
54148 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54149 +
54150 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54151 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54152 + return mode;
54153 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54154 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54155 + return 0;
54156 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54157 + return 0;
54158 +
54159 + return (reqmode);
54160 +}
54161 +
54162 +__u32
54163 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54164 +{
54165 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54166 +}
54167 +
54168 +__u32
54169 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54170 +{
54171 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54172 +}
54173 +
54174 +__u32
54175 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54176 +{
54177 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54178 +}
54179 +
54180 +__u32
54181 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54182 +{
54183 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54184 +}
54185 +
54186 +__u32
54187 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54188 + umode_t *modeptr)
54189 +{
54190 + umode_t mode;
54191 +
54192 + *modeptr &= ~gr_acl_umask();
54193 + mode = *modeptr;
54194 +
54195 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54196 + return 1;
54197 +
54198 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54199 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54200 + GR_CHMOD_ACL_MSG);
54201 + } else {
54202 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54203 + }
54204 +}
54205 +
54206 +__u32
54207 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54208 +{
54209 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54210 +}
54211 +
54212 +__u32
54213 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54214 +{
54215 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54216 +}
54217 +
54218 +__u32
54219 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54220 +{
54221 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54222 +}
54223 +
54224 +__u32
54225 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54226 +{
54227 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54228 + GR_UNIXCONNECT_ACL_MSG);
54229 +}
54230 +
54231 +/* hardlinks require at minimum create and link permission,
54232 + any additional privilege required is based on the
54233 + privilege of the file being linked to
54234 +*/
54235 +__u32
54236 +gr_acl_handle_link(const struct dentry * new_dentry,
54237 + const struct dentry * parent_dentry,
54238 + const struct vfsmount * parent_mnt,
54239 + const struct dentry * old_dentry,
54240 + const struct vfsmount * old_mnt, const char *to)
54241 +{
54242 + __u32 mode;
54243 + __u32 needmode = GR_CREATE | GR_LINK;
54244 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54245 +
54246 + mode =
54247 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54248 + old_mnt);
54249 +
54250 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54251 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54252 + return mode;
54253 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54254 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54255 + return 0;
54256 + } else if (unlikely((mode & needmode) != needmode))
54257 + return 0;
54258 +
54259 + return 1;
54260 +}
54261 +
54262 +__u32
54263 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54264 + const struct dentry * parent_dentry,
54265 + const struct vfsmount * parent_mnt, const char *from)
54266 +{
54267 + __u32 needmode = GR_WRITE | GR_CREATE;
54268 + __u32 mode;
54269 +
54270 + mode =
54271 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54272 + GR_CREATE | GR_AUDIT_CREATE |
54273 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54274 +
54275 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54276 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54277 + return mode;
54278 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54279 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54280 + return 0;
54281 + } else if (unlikely((mode & needmode) != needmode))
54282 + return 0;
54283 +
54284 + return (GR_WRITE | GR_CREATE);
54285 +}
54286 +
54287 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54288 +{
54289 + __u32 mode;
54290 +
54291 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54292 +
54293 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54294 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54295 + return mode;
54296 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54297 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54298 + return 0;
54299 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54300 + return 0;
54301 +
54302 + return (reqmode);
54303 +}
54304 +
54305 +__u32
54306 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54307 + const struct dentry * parent_dentry,
54308 + const struct vfsmount * parent_mnt,
54309 + const int mode)
54310 +{
54311 + __u32 reqmode = GR_WRITE | GR_CREATE;
54312 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54313 + reqmode |= GR_SETID;
54314 +
54315 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54316 + reqmode, GR_MKNOD_ACL_MSG);
54317 +}
54318 +
54319 +__u32
54320 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54321 + const struct dentry *parent_dentry,
54322 + const struct vfsmount *parent_mnt)
54323 +{
54324 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54325 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54326 +}
54327 +
54328 +#define RENAME_CHECK_SUCCESS(old, new) \
54329 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54330 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54331 +
54332 +int
54333 +gr_acl_handle_rename(struct dentry *new_dentry,
54334 + struct dentry *parent_dentry,
54335 + const struct vfsmount *parent_mnt,
54336 + struct dentry *old_dentry,
54337 + struct inode *old_parent_inode,
54338 + struct vfsmount *old_mnt, const char *newname)
54339 +{
54340 + __u32 comp1, comp2;
54341 + int error = 0;
54342 +
54343 + if (unlikely(!gr_acl_is_enabled()))
54344 + return 0;
54345 +
54346 + if (!new_dentry->d_inode) {
54347 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54348 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54349 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54350 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54351 + GR_DELETE | GR_AUDIT_DELETE |
54352 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54353 + GR_SUPPRESS, old_mnt);
54354 + } else {
54355 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54356 + GR_CREATE | GR_DELETE |
54357 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54358 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54359 + GR_SUPPRESS, parent_mnt);
54360 + comp2 =
54361 + gr_search_file(old_dentry,
54362 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54363 + GR_DELETE | GR_AUDIT_DELETE |
54364 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54365 + }
54366 +
54367 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54368 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54369 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54370 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54371 + && !(comp2 & GR_SUPPRESS)) {
54372 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54373 + error = -EACCES;
54374 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54375 + error = -EACCES;
54376 +
54377 + return error;
54378 +}
54379 +
54380 +void
54381 +gr_acl_handle_exit(void)
54382 +{
54383 + u16 id;
54384 + char *rolename;
54385 + struct file *exec_file;
54386 +
54387 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54388 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54389 + id = current->acl_role_id;
54390 + rolename = current->role->rolename;
54391 + gr_set_acls(1);
54392 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54393 + }
54394 +
54395 + write_lock(&grsec_exec_file_lock);
54396 + exec_file = current->exec_file;
54397 + current->exec_file = NULL;
54398 + write_unlock(&grsec_exec_file_lock);
54399 +
54400 + if (exec_file)
54401 + fput(exec_file);
54402 +}
54403 +
54404 +int
54405 +gr_acl_handle_procpidmem(const struct task_struct *task)
54406 +{
54407 + if (unlikely(!gr_acl_is_enabled()))
54408 + return 0;
54409 +
54410 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54411 + return -EACCES;
54412 +
54413 + return 0;
54414 +}
54415 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54416 new file mode 100644
54417 index 0000000..17050ca
54418 --- /dev/null
54419 +++ b/grsecurity/gracl_ip.c
54420 @@ -0,0 +1,381 @@
54421 +#include <linux/kernel.h>
54422 +#include <asm/uaccess.h>
54423 +#include <asm/errno.h>
54424 +#include <net/sock.h>
54425 +#include <linux/file.h>
54426 +#include <linux/fs.h>
54427 +#include <linux/net.h>
54428 +#include <linux/in.h>
54429 +#include <linux/skbuff.h>
54430 +#include <linux/ip.h>
54431 +#include <linux/udp.h>
54432 +#include <linux/types.h>
54433 +#include <linux/sched.h>
54434 +#include <linux/netdevice.h>
54435 +#include <linux/inetdevice.h>
54436 +#include <linux/gracl.h>
54437 +#include <linux/grsecurity.h>
54438 +#include <linux/grinternal.h>
54439 +
54440 +#define GR_BIND 0x01
54441 +#define GR_CONNECT 0x02
54442 +#define GR_INVERT 0x04
54443 +#define GR_BINDOVERRIDE 0x08
54444 +#define GR_CONNECTOVERRIDE 0x10
54445 +#define GR_SOCK_FAMILY 0x20
54446 +
54447 +static const char * gr_protocols[IPPROTO_MAX] = {
54448 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54449 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54450 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54451 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54452 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54453 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54454 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54455 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54456 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54457 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54458 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54459 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54460 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54461 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54462 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54463 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54464 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54465 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54466 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54467 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54468 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54469 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54470 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54471 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54472 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54473 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54474 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54475 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54476 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54477 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54478 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54479 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54480 + };
54481 +
54482 +static const char * gr_socktypes[SOCK_MAX] = {
54483 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54484 + "unknown:7", "unknown:8", "unknown:9", "packet"
54485 + };
54486 +
54487 +static const char * gr_sockfamilies[AF_MAX+1] = {
54488 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54489 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54490 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54491 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54492 + };
54493 +
54494 +const char *
54495 +gr_proto_to_name(unsigned char proto)
54496 +{
54497 + return gr_protocols[proto];
54498 +}
54499 +
54500 +const char *
54501 +gr_socktype_to_name(unsigned char type)
54502 +{
54503 + return gr_socktypes[type];
54504 +}
54505 +
54506 +const char *
54507 +gr_sockfamily_to_name(unsigned char family)
54508 +{
54509 + return gr_sockfamilies[family];
54510 +}
54511 +
54512 +int
54513 +gr_search_socket(const int domain, const int type, const int protocol)
54514 +{
54515 + struct acl_subject_label *curr;
54516 + const struct cred *cred = current_cred();
54517 +
54518 + if (unlikely(!gr_acl_is_enabled()))
54519 + goto exit;
54520 +
54521 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54522 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54523 + goto exit; // let the kernel handle it
54524 +
54525 + curr = current->acl;
54526 +
54527 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54528 + /* the family is allowed, if this is PF_INET allow it only if
54529 + the extra sock type/protocol checks pass */
54530 + if (domain == PF_INET)
54531 + goto inet_check;
54532 + goto exit;
54533 + } else {
54534 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54535 + __u32 fakeip = 0;
54536 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54537 + current->role->roletype, cred->uid,
54538 + cred->gid, current->exec_file ?
54539 + gr_to_filename(current->exec_file->f_path.dentry,
54540 + current->exec_file->f_path.mnt) :
54541 + curr->filename, curr->filename,
54542 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54543 + &current->signal->saved_ip);
54544 + goto exit;
54545 + }
54546 + goto exit_fail;
54547 + }
54548 +
54549 +inet_check:
54550 + /* the rest of this checking is for IPv4 only */
54551 + if (!curr->ips)
54552 + goto exit;
54553 +
54554 + if ((curr->ip_type & (1 << type)) &&
54555 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54556 + goto exit;
54557 +
54558 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54559 + /* we don't place acls on raw sockets , and sometimes
54560 + dgram/ip sockets are opened for ioctl and not
54561 + bind/connect, so we'll fake a bind learn log */
54562 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54563 + __u32 fakeip = 0;
54564 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54565 + current->role->roletype, cred->uid,
54566 + cred->gid, current->exec_file ?
54567 + gr_to_filename(current->exec_file->f_path.dentry,
54568 + current->exec_file->f_path.mnt) :
54569 + curr->filename, curr->filename,
54570 + &fakeip, 0, type,
54571 + protocol, GR_CONNECT, &current->signal->saved_ip);
54572 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54573 + __u32 fakeip = 0;
54574 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54575 + current->role->roletype, cred->uid,
54576 + cred->gid, current->exec_file ?
54577 + gr_to_filename(current->exec_file->f_path.dentry,
54578 + current->exec_file->f_path.mnt) :
54579 + curr->filename, curr->filename,
54580 + &fakeip, 0, type,
54581 + protocol, GR_BIND, &current->signal->saved_ip);
54582 + }
54583 + /* we'll log when they use connect or bind */
54584 + goto exit;
54585 + }
54586 +
54587 +exit_fail:
54588 + if (domain == PF_INET)
54589 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54590 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54591 + else
54592 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54593 + gr_socktype_to_name(type), protocol);
54594 +
54595 + return 0;
54596 +exit:
54597 + return 1;
54598 +}
54599 +
54600 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54601 +{
54602 + if ((ip->mode & mode) &&
54603 + (ip_port >= ip->low) &&
54604 + (ip_port <= ip->high) &&
54605 + ((ntohl(ip_addr) & our_netmask) ==
54606 + (ntohl(our_addr) & our_netmask))
54607 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54608 + && (ip->type & (1 << type))) {
54609 + if (ip->mode & GR_INVERT)
54610 + return 2; // specifically denied
54611 + else
54612 + return 1; // allowed
54613 + }
54614 +
54615 + return 0; // not specifically allowed, may continue parsing
54616 +}
54617 +
54618 +static int
54619 +gr_search_connectbind(const int full_mode, struct sock *sk,
54620 + struct sockaddr_in *addr, const int type)
54621 +{
54622 + char iface[IFNAMSIZ] = {0};
54623 + struct acl_subject_label *curr;
54624 + struct acl_ip_label *ip;
54625 + struct inet_sock *isk;
54626 + struct net_device *dev;
54627 + struct in_device *idev;
54628 + unsigned long i;
54629 + int ret;
54630 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54631 + __u32 ip_addr = 0;
54632 + __u32 our_addr;
54633 + __u32 our_netmask;
54634 + char *p;
54635 + __u16 ip_port = 0;
54636 + const struct cred *cred = current_cred();
54637 +
54638 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54639 + return 0;
54640 +
54641 + curr = current->acl;
54642 + isk = inet_sk(sk);
54643 +
54644 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54645 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54646 + addr->sin_addr.s_addr = curr->inaddr_any_override;
54647 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54648 + struct sockaddr_in saddr;
54649 + int err;
54650 +
54651 + saddr.sin_family = AF_INET;
54652 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
54653 + saddr.sin_port = isk->inet_sport;
54654 +
54655 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54656 + if (err)
54657 + return err;
54658 +
54659 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54660 + if (err)
54661 + return err;
54662 + }
54663 +
54664 + if (!curr->ips)
54665 + return 0;
54666 +
54667 + ip_addr = addr->sin_addr.s_addr;
54668 + ip_port = ntohs(addr->sin_port);
54669 +
54670 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54671 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54672 + current->role->roletype, cred->uid,
54673 + cred->gid, current->exec_file ?
54674 + gr_to_filename(current->exec_file->f_path.dentry,
54675 + current->exec_file->f_path.mnt) :
54676 + curr->filename, curr->filename,
54677 + &ip_addr, ip_port, type,
54678 + sk->sk_protocol, mode, &current->signal->saved_ip);
54679 + return 0;
54680 + }
54681 +
54682 + for (i = 0; i < curr->ip_num; i++) {
54683 + ip = *(curr->ips + i);
54684 + if (ip->iface != NULL) {
54685 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
54686 + p = strchr(iface, ':');
54687 + if (p != NULL)
54688 + *p = '\0';
54689 + dev = dev_get_by_name(sock_net(sk), iface);
54690 + if (dev == NULL)
54691 + continue;
54692 + idev = in_dev_get(dev);
54693 + if (idev == NULL) {
54694 + dev_put(dev);
54695 + continue;
54696 + }
54697 + rcu_read_lock();
54698 + for_ifa(idev) {
54699 + if (!strcmp(ip->iface, ifa->ifa_label)) {
54700 + our_addr = ifa->ifa_address;
54701 + our_netmask = 0xffffffff;
54702 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54703 + if (ret == 1) {
54704 + rcu_read_unlock();
54705 + in_dev_put(idev);
54706 + dev_put(dev);
54707 + return 0;
54708 + } else if (ret == 2) {
54709 + rcu_read_unlock();
54710 + in_dev_put(idev);
54711 + dev_put(dev);
54712 + goto denied;
54713 + }
54714 + }
54715 + } endfor_ifa(idev);
54716 + rcu_read_unlock();
54717 + in_dev_put(idev);
54718 + dev_put(dev);
54719 + } else {
54720 + our_addr = ip->addr;
54721 + our_netmask = ip->netmask;
54722 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54723 + if (ret == 1)
54724 + return 0;
54725 + else if (ret == 2)
54726 + goto denied;
54727 + }
54728 + }
54729 +
54730 +denied:
54731 + if (mode == GR_BIND)
54732 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54733 + else if (mode == GR_CONNECT)
54734 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54735 +
54736 + return -EACCES;
54737 +}
54738 +
54739 +int
54740 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
54741 +{
54742 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
54743 +}
54744 +
54745 +int
54746 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
54747 +{
54748 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
54749 +}
54750 +
54751 +int gr_search_listen(struct socket *sock)
54752 +{
54753 + struct sock *sk = sock->sk;
54754 + struct sockaddr_in addr;
54755 +
54756 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54757 + addr.sin_port = inet_sk(sk)->inet_sport;
54758 +
54759 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54760 +}
54761 +
54762 +int gr_search_accept(struct socket *sock)
54763 +{
54764 + struct sock *sk = sock->sk;
54765 + struct sockaddr_in addr;
54766 +
54767 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54768 + addr.sin_port = inet_sk(sk)->inet_sport;
54769 +
54770 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54771 +}
54772 +
54773 +int
54774 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
54775 +{
54776 + if (addr)
54777 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
54778 + else {
54779 + struct sockaddr_in sin;
54780 + const struct inet_sock *inet = inet_sk(sk);
54781 +
54782 + sin.sin_addr.s_addr = inet->inet_daddr;
54783 + sin.sin_port = inet->inet_dport;
54784 +
54785 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
54786 + }
54787 +}
54788 +
54789 +int
54790 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
54791 +{
54792 + struct sockaddr_in sin;
54793 +
54794 + if (unlikely(skb->len < sizeof (struct udphdr)))
54795 + return 0; // skip this packet
54796 +
54797 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
54798 + sin.sin_port = udp_hdr(skb)->source;
54799 +
54800 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
54801 +}
54802 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
54803 new file mode 100644
54804 index 0000000..25f54ef
54805 --- /dev/null
54806 +++ b/grsecurity/gracl_learn.c
54807 @@ -0,0 +1,207 @@
54808 +#include <linux/kernel.h>
54809 +#include <linux/mm.h>
54810 +#include <linux/sched.h>
54811 +#include <linux/poll.h>
54812 +#include <linux/string.h>
54813 +#include <linux/file.h>
54814 +#include <linux/types.h>
54815 +#include <linux/vmalloc.h>
54816 +#include <linux/grinternal.h>
54817 +
54818 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
54819 + size_t count, loff_t *ppos);
54820 +extern int gr_acl_is_enabled(void);
54821 +
54822 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
54823 +static int gr_learn_attached;
54824 +
54825 +/* use a 512k buffer */
54826 +#define LEARN_BUFFER_SIZE (512 * 1024)
54827 +
54828 +static DEFINE_SPINLOCK(gr_learn_lock);
54829 +static DEFINE_MUTEX(gr_learn_user_mutex);
54830 +
54831 +/* we need to maintain two buffers, so that the kernel context of grlearn
54832 + uses a semaphore around the userspace copying, and the other kernel contexts
54833 + use a spinlock when copying into the buffer, since they cannot sleep
54834 +*/
54835 +static char *learn_buffer;
54836 +static char *learn_buffer_user;
54837 +static int learn_buffer_len;
54838 +static int learn_buffer_user_len;
54839 +
54840 +static ssize_t
54841 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
54842 +{
54843 + DECLARE_WAITQUEUE(wait, current);
54844 + ssize_t retval = 0;
54845 +
54846 + add_wait_queue(&learn_wait, &wait);
54847 + set_current_state(TASK_INTERRUPTIBLE);
54848 + do {
54849 + mutex_lock(&gr_learn_user_mutex);
54850 + spin_lock(&gr_learn_lock);
54851 + if (learn_buffer_len)
54852 + break;
54853 + spin_unlock(&gr_learn_lock);
54854 + mutex_unlock(&gr_learn_user_mutex);
54855 + if (file->f_flags & O_NONBLOCK) {
54856 + retval = -EAGAIN;
54857 + goto out;
54858 + }
54859 + if (signal_pending(current)) {
54860 + retval = -ERESTARTSYS;
54861 + goto out;
54862 + }
54863 +
54864 + schedule();
54865 + } while (1);
54866 +
54867 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
54868 + learn_buffer_user_len = learn_buffer_len;
54869 + retval = learn_buffer_len;
54870 + learn_buffer_len = 0;
54871 +
54872 + spin_unlock(&gr_learn_lock);
54873 +
54874 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
54875 + retval = -EFAULT;
54876 +
54877 + mutex_unlock(&gr_learn_user_mutex);
54878 +out:
54879 + set_current_state(TASK_RUNNING);
54880 + remove_wait_queue(&learn_wait, &wait);
54881 + return retval;
54882 +}
54883 +
54884 +static unsigned int
54885 +poll_learn(struct file * file, poll_table * wait)
54886 +{
54887 + poll_wait(file, &learn_wait, wait);
54888 +
54889 + if (learn_buffer_len)
54890 + return (POLLIN | POLLRDNORM);
54891 +
54892 + return 0;
54893 +}
54894 +
54895 +void
54896 +gr_clear_learn_entries(void)
54897 +{
54898 + char *tmp;
54899 +
54900 + mutex_lock(&gr_learn_user_mutex);
54901 + spin_lock(&gr_learn_lock);
54902 + tmp = learn_buffer;
54903 + learn_buffer = NULL;
54904 + spin_unlock(&gr_learn_lock);
54905 + if (tmp)
54906 + vfree(tmp);
54907 + if (learn_buffer_user != NULL) {
54908 + vfree(learn_buffer_user);
54909 + learn_buffer_user = NULL;
54910 + }
54911 + learn_buffer_len = 0;
54912 + mutex_unlock(&gr_learn_user_mutex);
54913 +
54914 + return;
54915 +}
54916 +
54917 +void
54918 +gr_add_learn_entry(const char *fmt, ...)
54919 +{
54920 + va_list args;
54921 + unsigned int len;
54922 +
54923 + if (!gr_learn_attached)
54924 + return;
54925 +
54926 + spin_lock(&gr_learn_lock);
54927 +
54928 + /* leave a gap at the end so we know when it's "full" but don't have to
54929 + compute the exact length of the string we're trying to append
54930 + */
54931 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
54932 + spin_unlock(&gr_learn_lock);
54933 + wake_up_interruptible(&learn_wait);
54934 + return;
54935 + }
54936 + if (learn_buffer == NULL) {
54937 + spin_unlock(&gr_learn_lock);
54938 + return;
54939 + }
54940 +
54941 + va_start(args, fmt);
54942 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
54943 + va_end(args);
54944 +
54945 + learn_buffer_len += len + 1;
54946 +
54947 + spin_unlock(&gr_learn_lock);
54948 + wake_up_interruptible(&learn_wait);
54949 +
54950 + return;
54951 +}
54952 +
54953 +static int
54954 +open_learn(struct inode *inode, struct file *file)
54955 +{
54956 + if (file->f_mode & FMODE_READ && gr_learn_attached)
54957 + return -EBUSY;
54958 + if (file->f_mode & FMODE_READ) {
54959 + int retval = 0;
54960 + mutex_lock(&gr_learn_user_mutex);
54961 + if (learn_buffer == NULL)
54962 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
54963 + if (learn_buffer_user == NULL)
54964 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
54965 + if (learn_buffer == NULL) {
54966 + retval = -ENOMEM;
54967 + goto out_error;
54968 + }
54969 + if (learn_buffer_user == NULL) {
54970 + retval = -ENOMEM;
54971 + goto out_error;
54972 + }
54973 + learn_buffer_len = 0;
54974 + learn_buffer_user_len = 0;
54975 + gr_learn_attached = 1;
54976 +out_error:
54977 + mutex_unlock(&gr_learn_user_mutex);
54978 + return retval;
54979 + }
54980 + return 0;
54981 +}
54982 +
54983 +static int
54984 +close_learn(struct inode *inode, struct file *file)
54985 +{
54986 + if (file->f_mode & FMODE_READ) {
54987 + char *tmp = NULL;
54988 + mutex_lock(&gr_learn_user_mutex);
54989 + spin_lock(&gr_learn_lock);
54990 + tmp = learn_buffer;
54991 + learn_buffer = NULL;
54992 + spin_unlock(&gr_learn_lock);
54993 + if (tmp)
54994 + vfree(tmp);
54995 + if (learn_buffer_user != NULL) {
54996 + vfree(learn_buffer_user);
54997 + learn_buffer_user = NULL;
54998 + }
54999 + learn_buffer_len = 0;
55000 + learn_buffer_user_len = 0;
55001 + gr_learn_attached = 0;
55002 + mutex_unlock(&gr_learn_user_mutex);
55003 + }
55004 +
55005 + return 0;
55006 +}
55007 +
55008 +const struct file_operations grsec_fops = {
55009 + .read = read_learn,
55010 + .write = write_grsec_handler,
55011 + .open = open_learn,
55012 + .release = close_learn,
55013 + .poll = poll_learn,
55014 +};
55015 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55016 new file mode 100644
55017 index 0000000..39645c9
55018 --- /dev/null
55019 +++ b/grsecurity/gracl_res.c
55020 @@ -0,0 +1,68 @@
55021 +#include <linux/kernel.h>
55022 +#include <linux/sched.h>
55023 +#include <linux/gracl.h>
55024 +#include <linux/grinternal.h>
55025 +
55026 +static const char *restab_log[] = {
55027 + [RLIMIT_CPU] = "RLIMIT_CPU",
55028 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55029 + [RLIMIT_DATA] = "RLIMIT_DATA",
55030 + [RLIMIT_STACK] = "RLIMIT_STACK",
55031 + [RLIMIT_CORE] = "RLIMIT_CORE",
55032 + [RLIMIT_RSS] = "RLIMIT_RSS",
55033 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55034 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55035 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55036 + [RLIMIT_AS] = "RLIMIT_AS",
55037 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55038 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55039 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55040 + [RLIMIT_NICE] = "RLIMIT_NICE",
55041 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55042 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55043 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55044 +};
55045 +
55046 +void
55047 +gr_log_resource(const struct task_struct *task,
55048 + const int res, const unsigned long wanted, const int gt)
55049 +{
55050 + const struct cred *cred;
55051 + unsigned long rlim;
55052 +
55053 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55054 + return;
55055 +
55056 + // not yet supported resource
55057 + if (unlikely(!restab_log[res]))
55058 + return;
55059 +
55060 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55061 + rlim = task_rlimit_max(task, res);
55062 + else
55063 + rlim = task_rlimit(task, res);
55064 +
55065 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55066 + return;
55067 +
55068 + rcu_read_lock();
55069 + cred = __task_cred(task);
55070 +
55071 + if (res == RLIMIT_NPROC &&
55072 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55073 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55074 + goto out_rcu_unlock;
55075 + else if (res == RLIMIT_MEMLOCK &&
55076 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55077 + goto out_rcu_unlock;
55078 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55079 + goto out_rcu_unlock;
55080 + rcu_read_unlock();
55081 +
55082 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55083 +
55084 + return;
55085 +out_rcu_unlock:
55086 + rcu_read_unlock();
55087 + return;
55088 +}
55089 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55090 new file mode 100644
55091 index 0000000..5556be3
55092 --- /dev/null
55093 +++ b/grsecurity/gracl_segv.c
55094 @@ -0,0 +1,299 @@
55095 +#include <linux/kernel.h>
55096 +#include <linux/mm.h>
55097 +#include <asm/uaccess.h>
55098 +#include <asm/errno.h>
55099 +#include <asm/mman.h>
55100 +#include <net/sock.h>
55101 +#include <linux/file.h>
55102 +#include <linux/fs.h>
55103 +#include <linux/net.h>
55104 +#include <linux/in.h>
55105 +#include <linux/slab.h>
55106 +#include <linux/types.h>
55107 +#include <linux/sched.h>
55108 +#include <linux/timer.h>
55109 +#include <linux/gracl.h>
55110 +#include <linux/grsecurity.h>
55111 +#include <linux/grinternal.h>
55112 +
55113 +static struct crash_uid *uid_set;
55114 +static unsigned short uid_used;
55115 +static DEFINE_SPINLOCK(gr_uid_lock);
55116 +extern rwlock_t gr_inode_lock;
55117 +extern struct acl_subject_label *
55118 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55119 + struct acl_role_label *role);
55120 +
55121 +#ifdef CONFIG_BTRFS_FS
55122 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55123 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55124 +#endif
55125 +
55126 +static inline dev_t __get_dev(const struct dentry *dentry)
55127 +{
55128 +#ifdef CONFIG_BTRFS_FS
55129 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55130 + return get_btrfs_dev_from_inode(dentry->d_inode);
55131 + else
55132 +#endif
55133 + return dentry->d_inode->i_sb->s_dev;
55134 +}
55135 +
55136 +int
55137 +gr_init_uidset(void)
55138 +{
55139 + uid_set =
55140 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55141 + uid_used = 0;
55142 +
55143 + return uid_set ? 1 : 0;
55144 +}
55145 +
55146 +void
55147 +gr_free_uidset(void)
55148 +{
55149 + if (uid_set)
55150 + kfree(uid_set);
55151 +
55152 + return;
55153 +}
55154 +
55155 +int
55156 +gr_find_uid(const uid_t uid)
55157 +{
55158 + struct crash_uid *tmp = uid_set;
55159 + uid_t buid;
55160 + int low = 0, high = uid_used - 1, mid;
55161 +
55162 + while (high >= low) {
55163 + mid = (low + high) >> 1;
55164 + buid = tmp[mid].uid;
55165 + if (buid == uid)
55166 + return mid;
55167 + if (buid > uid)
55168 + high = mid - 1;
55169 + if (buid < uid)
55170 + low = mid + 1;
55171 + }
55172 +
55173 + return -1;
55174 +}
55175 +
55176 +static __inline__ void
55177 +gr_insertsort(void)
55178 +{
55179 + unsigned short i, j;
55180 + struct crash_uid index;
55181 +
55182 + for (i = 1; i < uid_used; i++) {
55183 + index = uid_set[i];
55184 + j = i;
55185 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55186 + uid_set[j] = uid_set[j - 1];
55187 + j--;
55188 + }
55189 + uid_set[j] = index;
55190 + }
55191 +
55192 + return;
55193 +}
55194 +
55195 +static __inline__ void
55196 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55197 +{
55198 + int loc;
55199 +
55200 + if (uid_used == GR_UIDTABLE_MAX)
55201 + return;
55202 +
55203 + loc = gr_find_uid(uid);
55204 +
55205 + if (loc >= 0) {
55206 + uid_set[loc].expires = expires;
55207 + return;
55208 + }
55209 +
55210 + uid_set[uid_used].uid = uid;
55211 + uid_set[uid_used].expires = expires;
55212 + uid_used++;
55213 +
55214 + gr_insertsort();
55215 +
55216 + return;
55217 +}
55218 +
55219 +void
55220 +gr_remove_uid(const unsigned short loc)
55221 +{
55222 + unsigned short i;
55223 +
55224 + for (i = loc + 1; i < uid_used; i++)
55225 + uid_set[i - 1] = uid_set[i];
55226 +
55227 + uid_used--;
55228 +
55229 + return;
55230 +}
55231 +
55232 +int
55233 +gr_check_crash_uid(const uid_t uid)
55234 +{
55235 + int loc;
55236 + int ret = 0;
55237 +
55238 + if (unlikely(!gr_acl_is_enabled()))
55239 + return 0;
55240 +
55241 + spin_lock(&gr_uid_lock);
55242 + loc = gr_find_uid(uid);
55243 +
55244 + if (loc < 0)
55245 + goto out_unlock;
55246 +
55247 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55248 + gr_remove_uid(loc);
55249 + else
55250 + ret = 1;
55251 +
55252 +out_unlock:
55253 + spin_unlock(&gr_uid_lock);
55254 + return ret;
55255 +}
55256 +
55257 +static __inline__ int
55258 +proc_is_setxid(const struct cred *cred)
55259 +{
55260 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55261 + cred->uid != cred->fsuid)
55262 + return 1;
55263 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55264 + cred->gid != cred->fsgid)
55265 + return 1;
55266 +
55267 + return 0;
55268 +}
55269 +
55270 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55271 +
55272 +void
55273 +gr_handle_crash(struct task_struct *task, const int sig)
55274 +{
55275 + struct acl_subject_label *curr;
55276 + struct task_struct *tsk, *tsk2;
55277 + const struct cred *cred;
55278 + const struct cred *cred2;
55279 +
55280 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55281 + return;
55282 +
55283 + if (unlikely(!gr_acl_is_enabled()))
55284 + return;
55285 +
55286 + curr = task->acl;
55287 +
55288 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55289 + return;
55290 +
55291 + if (time_before_eq(curr->expires, get_seconds())) {
55292 + curr->expires = 0;
55293 + curr->crashes = 0;
55294 + }
55295 +
55296 + curr->crashes++;
55297 +
55298 + if (!curr->expires)
55299 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55300 +
55301 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55302 + time_after(curr->expires, get_seconds())) {
55303 + rcu_read_lock();
55304 + cred = __task_cred(task);
55305 + if (cred->uid && proc_is_setxid(cred)) {
55306 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55307 + spin_lock(&gr_uid_lock);
55308 + gr_insert_uid(cred->uid, curr->expires);
55309 + spin_unlock(&gr_uid_lock);
55310 + curr->expires = 0;
55311 + curr->crashes = 0;
55312 + read_lock(&tasklist_lock);
55313 + do_each_thread(tsk2, tsk) {
55314 + cred2 = __task_cred(tsk);
55315 + if (tsk != task && cred2->uid == cred->uid)
55316 + gr_fake_force_sig(SIGKILL, tsk);
55317 + } while_each_thread(tsk2, tsk);
55318 + read_unlock(&tasklist_lock);
55319 + } else {
55320 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55321 + read_lock(&tasklist_lock);
55322 + read_lock(&grsec_exec_file_lock);
55323 + do_each_thread(tsk2, tsk) {
55324 + if (likely(tsk != task)) {
55325 + // if this thread has the same subject as the one that triggered
55326 + // RES_CRASH and it's the same binary, kill it
55327 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55328 + gr_fake_force_sig(SIGKILL, tsk);
55329 + }
55330 + } while_each_thread(tsk2, tsk);
55331 + read_unlock(&grsec_exec_file_lock);
55332 + read_unlock(&tasklist_lock);
55333 + }
55334 + rcu_read_unlock();
55335 + }
55336 +
55337 + return;
55338 +}
55339 +
55340 +int
55341 +gr_check_crash_exec(const struct file *filp)
55342 +{
55343 + struct acl_subject_label *curr;
55344 +
55345 + if (unlikely(!gr_acl_is_enabled()))
55346 + return 0;
55347 +
55348 + read_lock(&gr_inode_lock);
55349 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55350 + __get_dev(filp->f_path.dentry),
55351 + current->role);
55352 + read_unlock(&gr_inode_lock);
55353 +
55354 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55355 + (!curr->crashes && !curr->expires))
55356 + return 0;
55357 +
55358 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55359 + time_after(curr->expires, get_seconds()))
55360 + return 1;
55361 + else if (time_before_eq(curr->expires, get_seconds())) {
55362 + curr->crashes = 0;
55363 + curr->expires = 0;
55364 + }
55365 +
55366 + return 0;
55367 +}
55368 +
55369 +void
55370 +gr_handle_alertkill(struct task_struct *task)
55371 +{
55372 + struct acl_subject_label *curracl;
55373 + __u32 curr_ip;
55374 + struct task_struct *p, *p2;
55375 +
55376 + if (unlikely(!gr_acl_is_enabled()))
55377 + return;
55378 +
55379 + curracl = task->acl;
55380 + curr_ip = task->signal->curr_ip;
55381 +
55382 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55383 + read_lock(&tasklist_lock);
55384 + do_each_thread(p2, p) {
55385 + if (p->signal->curr_ip == curr_ip)
55386 + gr_fake_force_sig(SIGKILL, p);
55387 + } while_each_thread(p2, p);
55388 + read_unlock(&tasklist_lock);
55389 + } else if (curracl->mode & GR_KILLPROC)
55390 + gr_fake_force_sig(SIGKILL, task);
55391 +
55392 + return;
55393 +}
55394 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55395 new file mode 100644
55396 index 0000000..9d83a69
55397 --- /dev/null
55398 +++ b/grsecurity/gracl_shm.c
55399 @@ -0,0 +1,40 @@
55400 +#include <linux/kernel.h>
55401 +#include <linux/mm.h>
55402 +#include <linux/sched.h>
55403 +#include <linux/file.h>
55404 +#include <linux/ipc.h>
55405 +#include <linux/gracl.h>
55406 +#include <linux/grsecurity.h>
55407 +#include <linux/grinternal.h>
55408 +
55409 +int
55410 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55411 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55412 +{
55413 + struct task_struct *task;
55414 +
55415 + if (!gr_acl_is_enabled())
55416 + return 1;
55417 +
55418 + rcu_read_lock();
55419 + read_lock(&tasklist_lock);
55420 +
55421 + task = find_task_by_vpid(shm_cprid);
55422 +
55423 + if (unlikely(!task))
55424 + task = find_task_by_vpid(shm_lapid);
55425 +
55426 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55427 + (task->pid == shm_lapid)) &&
55428 + (task->acl->mode & GR_PROTSHM) &&
55429 + (task->acl != current->acl))) {
55430 + read_unlock(&tasklist_lock);
55431 + rcu_read_unlock();
55432 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55433 + return 0;
55434 + }
55435 + read_unlock(&tasklist_lock);
55436 + rcu_read_unlock();
55437 +
55438 + return 1;
55439 +}
55440 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55441 new file mode 100644
55442 index 0000000..bc0be01
55443 --- /dev/null
55444 +++ b/grsecurity/grsec_chdir.c
55445 @@ -0,0 +1,19 @@
55446 +#include <linux/kernel.h>
55447 +#include <linux/sched.h>
55448 +#include <linux/fs.h>
55449 +#include <linux/file.h>
55450 +#include <linux/grsecurity.h>
55451 +#include <linux/grinternal.h>
55452 +
55453 +void
55454 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55455 +{
55456 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55457 + if ((grsec_enable_chdir && grsec_enable_group &&
55458 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55459 + !grsec_enable_group)) {
55460 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55461 + }
55462 +#endif
55463 + return;
55464 +}
55465 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55466 new file mode 100644
55467 index 0000000..9807ee2
55468 --- /dev/null
55469 +++ b/grsecurity/grsec_chroot.c
55470 @@ -0,0 +1,368 @@
55471 +#include <linux/kernel.h>
55472 +#include <linux/module.h>
55473 +#include <linux/sched.h>
55474 +#include <linux/file.h>
55475 +#include <linux/fs.h>
55476 +#include <linux/mount.h>
55477 +#include <linux/types.h>
55478 +#include "../fs/mount.h"
55479 +#include <linux/grsecurity.h>
55480 +#include <linux/grinternal.h>
55481 +
55482 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55483 +{
55484 +#ifdef CONFIG_GRKERNSEC
55485 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55486 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55487 + task->gr_is_chrooted = 1;
55488 + else
55489 + task->gr_is_chrooted = 0;
55490 +
55491 + task->gr_chroot_dentry = path->dentry;
55492 +#endif
55493 + return;
55494 +}
55495 +
55496 +void gr_clear_chroot_entries(struct task_struct *task)
55497 +{
55498 +#ifdef CONFIG_GRKERNSEC
55499 + task->gr_is_chrooted = 0;
55500 + task->gr_chroot_dentry = NULL;
55501 +#endif
55502 + return;
55503 +}
55504 +
55505 +int
55506 +gr_handle_chroot_unix(const pid_t pid)
55507 +{
55508 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55509 + struct task_struct *p;
55510 +
55511 + if (unlikely(!grsec_enable_chroot_unix))
55512 + return 1;
55513 +
55514 + if (likely(!proc_is_chrooted(current)))
55515 + return 1;
55516 +
55517 + rcu_read_lock();
55518 + read_lock(&tasklist_lock);
55519 + p = find_task_by_vpid_unrestricted(pid);
55520 + if (unlikely(p && !have_same_root(current, p))) {
55521 + read_unlock(&tasklist_lock);
55522 + rcu_read_unlock();
55523 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55524 + return 0;
55525 + }
55526 + read_unlock(&tasklist_lock);
55527 + rcu_read_unlock();
55528 +#endif
55529 + return 1;
55530 +}
55531 +
55532 +int
55533 +gr_handle_chroot_nice(void)
55534 +{
55535 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55536 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55537 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55538 + return -EPERM;
55539 + }
55540 +#endif
55541 + return 0;
55542 +}
55543 +
55544 +int
55545 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55546 +{
55547 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55548 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55549 + && proc_is_chrooted(current)) {
55550 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55551 + return -EACCES;
55552 + }
55553 +#endif
55554 + return 0;
55555 +}
55556 +
55557 +int
55558 +gr_handle_chroot_rawio(const struct inode *inode)
55559 +{
55560 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55561 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55562 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55563 + return 1;
55564 +#endif
55565 + return 0;
55566 +}
55567 +
55568 +int
55569 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55570 +{
55571 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55572 + struct task_struct *p;
55573 + int ret = 0;
55574 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55575 + return ret;
55576 +
55577 + read_lock(&tasklist_lock);
55578 + do_each_pid_task(pid, type, p) {
55579 + if (!have_same_root(current, p)) {
55580 + ret = 1;
55581 + goto out;
55582 + }
55583 + } while_each_pid_task(pid, type, p);
55584 +out:
55585 + read_unlock(&tasklist_lock);
55586 + return ret;
55587 +#endif
55588 + return 0;
55589 +}
55590 +
55591 +int
55592 +gr_pid_is_chrooted(struct task_struct *p)
55593 +{
55594 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55595 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55596 + return 0;
55597 +
55598 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55599 + !have_same_root(current, p)) {
55600 + return 1;
55601 + }
55602 +#endif
55603 + return 0;
55604 +}
55605 +
55606 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55607 +
55608 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55609 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55610 +{
55611 + struct path path, currentroot;
55612 + int ret = 0;
55613 +
55614 + path.dentry = (struct dentry *)u_dentry;
55615 + path.mnt = (struct vfsmount *)u_mnt;
55616 + get_fs_root(current->fs, &currentroot);
55617 + if (path_is_under(&path, &currentroot))
55618 + ret = 1;
55619 + path_put(&currentroot);
55620 +
55621 + return ret;
55622 +}
55623 +#endif
55624 +
55625 +int
55626 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55627 +{
55628 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55629 + if (!grsec_enable_chroot_fchdir)
55630 + return 1;
55631 +
55632 + if (!proc_is_chrooted(current))
55633 + return 1;
55634 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55635 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55636 + return 0;
55637 + }
55638 +#endif
55639 + return 1;
55640 +}
55641 +
55642 +int
55643 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55644 + const time_t shm_createtime)
55645 +{
55646 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55647 + struct task_struct *p;
55648 + time_t starttime;
55649 +
55650 + if (unlikely(!grsec_enable_chroot_shmat))
55651 + return 1;
55652 +
55653 + if (likely(!proc_is_chrooted(current)))
55654 + return 1;
55655 +
55656 + rcu_read_lock();
55657 + read_lock(&tasklist_lock);
55658 +
55659 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55660 + starttime = p->start_time.tv_sec;
55661 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55662 + if (have_same_root(current, p)) {
55663 + goto allow;
55664 + } else {
55665 + read_unlock(&tasklist_lock);
55666 + rcu_read_unlock();
55667 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55668 + return 0;
55669 + }
55670 + }
55671 + /* creator exited, pid reuse, fall through to next check */
55672 + }
55673 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
55674 + if (unlikely(!have_same_root(current, p))) {
55675 + read_unlock(&tasklist_lock);
55676 + rcu_read_unlock();
55677 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55678 + return 0;
55679 + }
55680 + }
55681 +
55682 +allow:
55683 + read_unlock(&tasklist_lock);
55684 + rcu_read_unlock();
55685 +#endif
55686 + return 1;
55687 +}
55688 +
55689 +void
55690 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
55691 +{
55692 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55693 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
55694 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
55695 +#endif
55696 + return;
55697 +}
55698 +
55699 +int
55700 +gr_handle_chroot_mknod(const struct dentry *dentry,
55701 + const struct vfsmount *mnt, const int mode)
55702 +{
55703 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55704 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
55705 + proc_is_chrooted(current)) {
55706 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
55707 + return -EPERM;
55708 + }
55709 +#endif
55710 + return 0;
55711 +}
55712 +
55713 +int
55714 +gr_handle_chroot_mount(const struct dentry *dentry,
55715 + const struct vfsmount *mnt, const char *dev_name)
55716 +{
55717 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55718 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
55719 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
55720 + return -EPERM;
55721 + }
55722 +#endif
55723 + return 0;
55724 +}
55725 +
55726 +int
55727 +gr_handle_chroot_pivot(void)
55728 +{
55729 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55730 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
55731 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
55732 + return -EPERM;
55733 + }
55734 +#endif
55735 + return 0;
55736 +}
55737 +
55738 +int
55739 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
55740 +{
55741 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55742 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
55743 + !gr_is_outside_chroot(dentry, mnt)) {
55744 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
55745 + return -EPERM;
55746 + }
55747 +#endif
55748 + return 0;
55749 +}
55750 +
55751 +extern const char *captab_log[];
55752 +extern int captab_log_entries;
55753 +
55754 +int
55755 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55756 +{
55757 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55758 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
55759 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55760 + if (cap_raised(chroot_caps, cap)) {
55761 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
55762 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
55763 + }
55764 + return 0;
55765 + }
55766 + }
55767 +#endif
55768 + return 1;
55769 +}
55770 +
55771 +int
55772 +gr_chroot_is_capable(const int cap)
55773 +{
55774 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55775 + return gr_task_chroot_is_capable(current, current_cred(), cap);
55776 +#endif
55777 + return 1;
55778 +}
55779 +
55780 +int
55781 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
55782 +{
55783 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55784 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
55785 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55786 + if (cap_raised(chroot_caps, cap)) {
55787 + return 0;
55788 + }
55789 + }
55790 +#endif
55791 + return 1;
55792 +}
55793 +
55794 +int
55795 +gr_chroot_is_capable_nolog(const int cap)
55796 +{
55797 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55798 + return gr_task_chroot_is_capable_nolog(current, cap);
55799 +#endif
55800 + return 1;
55801 +}
55802 +
55803 +int
55804 +gr_handle_chroot_sysctl(const int op)
55805 +{
55806 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55807 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
55808 + proc_is_chrooted(current))
55809 + return -EACCES;
55810 +#endif
55811 + return 0;
55812 +}
55813 +
55814 +void
55815 +gr_handle_chroot_chdir(struct path *path)
55816 +{
55817 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55818 + if (grsec_enable_chroot_chdir)
55819 + set_fs_pwd(current->fs, path);
55820 +#endif
55821 + return;
55822 +}
55823 +
55824 +int
55825 +gr_handle_chroot_chmod(const struct dentry *dentry,
55826 + const struct vfsmount *mnt, const int mode)
55827 +{
55828 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55829 + /* allow chmod +s on directories, but not files */
55830 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
55831 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
55832 + proc_is_chrooted(current)) {
55833 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
55834 + return -EPERM;
55835 + }
55836 +#endif
55837 + return 0;
55838 +}
55839 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
55840 new file mode 100644
55841 index 0000000..213ad8b
55842 --- /dev/null
55843 +++ b/grsecurity/grsec_disabled.c
55844 @@ -0,0 +1,437 @@
55845 +#include <linux/kernel.h>
55846 +#include <linux/module.h>
55847 +#include <linux/sched.h>
55848 +#include <linux/file.h>
55849 +#include <linux/fs.h>
55850 +#include <linux/kdev_t.h>
55851 +#include <linux/net.h>
55852 +#include <linux/in.h>
55853 +#include <linux/ip.h>
55854 +#include <linux/skbuff.h>
55855 +#include <linux/sysctl.h>
55856 +
55857 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55858 +void
55859 +pax_set_initial_flags(struct linux_binprm *bprm)
55860 +{
55861 + return;
55862 +}
55863 +#endif
55864 +
55865 +#ifdef CONFIG_SYSCTL
55866 +__u32
55867 +gr_handle_sysctl(const struct ctl_table * table, const int op)
55868 +{
55869 + return 0;
55870 +}
55871 +#endif
55872 +
55873 +#ifdef CONFIG_TASKSTATS
55874 +int gr_is_taskstats_denied(int pid)
55875 +{
55876 + return 0;
55877 +}
55878 +#endif
55879 +
55880 +int
55881 +gr_acl_is_enabled(void)
55882 +{
55883 + return 0;
55884 +}
55885 +
55886 +void
55887 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
55888 +{
55889 + return;
55890 +}
55891 +
55892 +int
55893 +gr_handle_rawio(const struct inode *inode)
55894 +{
55895 + return 0;
55896 +}
55897 +
55898 +void
55899 +gr_acl_handle_psacct(struct task_struct *task, const long code)
55900 +{
55901 + return;
55902 +}
55903 +
55904 +int
55905 +gr_handle_ptrace(struct task_struct *task, const long request)
55906 +{
55907 + return 0;
55908 +}
55909 +
55910 +int
55911 +gr_handle_proc_ptrace(struct task_struct *task)
55912 +{
55913 + return 0;
55914 +}
55915 +
55916 +void
55917 +gr_learn_resource(const struct task_struct *task,
55918 + const int res, const unsigned long wanted, const int gt)
55919 +{
55920 + return;
55921 +}
55922 +
55923 +int
55924 +gr_set_acls(const int type)
55925 +{
55926 + return 0;
55927 +}
55928 +
55929 +int
55930 +gr_check_hidden_task(const struct task_struct *tsk)
55931 +{
55932 + return 0;
55933 +}
55934 +
55935 +int
55936 +gr_check_protected_task(const struct task_struct *task)
55937 +{
55938 + return 0;
55939 +}
55940 +
55941 +int
55942 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55943 +{
55944 + return 0;
55945 +}
55946 +
55947 +void
55948 +gr_copy_label(struct task_struct *tsk)
55949 +{
55950 + return;
55951 +}
55952 +
55953 +void
55954 +gr_set_pax_flags(struct task_struct *task)
55955 +{
55956 + return;
55957 +}
55958 +
55959 +int
55960 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55961 + const int unsafe_share)
55962 +{
55963 + return 0;
55964 +}
55965 +
55966 +void
55967 +gr_handle_delete(const ino_t ino, const dev_t dev)
55968 +{
55969 + return;
55970 +}
55971 +
55972 +void
55973 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
55974 +{
55975 + return;
55976 +}
55977 +
55978 +void
55979 +gr_handle_crash(struct task_struct *task, const int sig)
55980 +{
55981 + return;
55982 +}
55983 +
55984 +int
55985 +gr_check_crash_exec(const struct file *filp)
55986 +{
55987 + return 0;
55988 +}
55989 +
55990 +int
55991 +gr_check_crash_uid(const uid_t uid)
55992 +{
55993 + return 0;
55994 +}
55995 +
55996 +void
55997 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55998 + struct dentry *old_dentry,
55999 + struct dentry *new_dentry,
56000 + struct vfsmount *mnt, const __u8 replace)
56001 +{
56002 + return;
56003 +}
56004 +
56005 +int
56006 +gr_search_socket(const int family, const int type, const int protocol)
56007 +{
56008 + return 1;
56009 +}
56010 +
56011 +int
56012 +gr_search_connectbind(const int mode, const struct socket *sock,
56013 + const struct sockaddr_in *addr)
56014 +{
56015 + return 0;
56016 +}
56017 +
56018 +void
56019 +gr_handle_alertkill(struct task_struct *task)
56020 +{
56021 + return;
56022 +}
56023 +
56024 +__u32
56025 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56026 +{
56027 + return 1;
56028 +}
56029 +
56030 +__u32
56031 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56032 + const struct vfsmount * mnt)
56033 +{
56034 + return 1;
56035 +}
56036 +
56037 +__u32
56038 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56039 + int acc_mode)
56040 +{
56041 + return 1;
56042 +}
56043 +
56044 +__u32
56045 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56046 +{
56047 + return 1;
56048 +}
56049 +
56050 +__u32
56051 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56052 +{
56053 + return 1;
56054 +}
56055 +
56056 +int
56057 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56058 + unsigned int *vm_flags)
56059 +{
56060 + return 1;
56061 +}
56062 +
56063 +__u32
56064 +gr_acl_handle_truncate(const struct dentry * dentry,
56065 + const struct vfsmount * mnt)
56066 +{
56067 + return 1;
56068 +}
56069 +
56070 +__u32
56071 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56072 +{
56073 + return 1;
56074 +}
56075 +
56076 +__u32
56077 +gr_acl_handle_access(const struct dentry * dentry,
56078 + const struct vfsmount * mnt, const int fmode)
56079 +{
56080 + return 1;
56081 +}
56082 +
56083 +__u32
56084 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56085 + umode_t *mode)
56086 +{
56087 + return 1;
56088 +}
56089 +
56090 +__u32
56091 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56092 +{
56093 + return 1;
56094 +}
56095 +
56096 +__u32
56097 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56098 +{
56099 + return 1;
56100 +}
56101 +
56102 +void
56103 +grsecurity_init(void)
56104 +{
56105 + return;
56106 +}
56107 +
56108 +umode_t gr_acl_umask(void)
56109 +{
56110 + return 0;
56111 +}
56112 +
56113 +__u32
56114 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56115 + const struct dentry * parent_dentry,
56116 + const struct vfsmount * parent_mnt,
56117 + const int mode)
56118 +{
56119 + return 1;
56120 +}
56121 +
56122 +__u32
56123 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56124 + const struct dentry * parent_dentry,
56125 + const struct vfsmount * parent_mnt)
56126 +{
56127 + return 1;
56128 +}
56129 +
56130 +__u32
56131 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56132 + const struct dentry * parent_dentry,
56133 + const struct vfsmount * parent_mnt, const char *from)
56134 +{
56135 + return 1;
56136 +}
56137 +
56138 +__u32
56139 +gr_acl_handle_link(const struct dentry * new_dentry,
56140 + const struct dentry * parent_dentry,
56141 + const struct vfsmount * parent_mnt,
56142 + const struct dentry * old_dentry,
56143 + const struct vfsmount * old_mnt, const char *to)
56144 +{
56145 + return 1;
56146 +}
56147 +
56148 +int
56149 +gr_acl_handle_rename(const struct dentry *new_dentry,
56150 + const struct dentry *parent_dentry,
56151 + const struct vfsmount *parent_mnt,
56152 + const struct dentry *old_dentry,
56153 + const struct inode *old_parent_inode,
56154 + const struct vfsmount *old_mnt, const char *newname)
56155 +{
56156 + return 0;
56157 +}
56158 +
56159 +int
56160 +gr_acl_handle_filldir(const struct file *file, const char *name,
56161 + const int namelen, const ino_t ino)
56162 +{
56163 + return 1;
56164 +}
56165 +
56166 +int
56167 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56168 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56169 +{
56170 + return 1;
56171 +}
56172 +
56173 +int
56174 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56175 +{
56176 + return 0;
56177 +}
56178 +
56179 +int
56180 +gr_search_accept(const struct socket *sock)
56181 +{
56182 + return 0;
56183 +}
56184 +
56185 +int
56186 +gr_search_listen(const struct socket *sock)
56187 +{
56188 + return 0;
56189 +}
56190 +
56191 +int
56192 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56193 +{
56194 + return 0;
56195 +}
56196 +
56197 +__u32
56198 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56199 +{
56200 + return 1;
56201 +}
56202 +
56203 +__u32
56204 +gr_acl_handle_creat(const struct dentry * dentry,
56205 + const struct dentry * p_dentry,
56206 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56207 + const int imode)
56208 +{
56209 + return 1;
56210 +}
56211 +
56212 +void
56213 +gr_acl_handle_exit(void)
56214 +{
56215 + return;
56216 +}
56217 +
56218 +int
56219 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56220 +{
56221 + return 1;
56222 +}
56223 +
56224 +void
56225 +gr_set_role_label(const uid_t uid, const gid_t gid)
56226 +{
56227 + return;
56228 +}
56229 +
56230 +int
56231 +gr_acl_handle_procpidmem(const struct task_struct *task)
56232 +{
56233 + return 0;
56234 +}
56235 +
56236 +int
56237 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56238 +{
56239 + return 0;
56240 +}
56241 +
56242 +int
56243 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56244 +{
56245 + return 0;
56246 +}
56247 +
56248 +void
56249 +gr_set_kernel_label(struct task_struct *task)
56250 +{
56251 + return;
56252 +}
56253 +
56254 +int
56255 +gr_check_user_change(int real, int effective, int fs)
56256 +{
56257 + return 0;
56258 +}
56259 +
56260 +int
56261 +gr_check_group_change(int real, int effective, int fs)
56262 +{
56263 + return 0;
56264 +}
56265 +
56266 +int gr_acl_enable_at_secure(void)
56267 +{
56268 + return 0;
56269 +}
56270 +
56271 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56272 +{
56273 + return dentry->d_inode->i_sb->s_dev;
56274 +}
56275 +
56276 +EXPORT_SYMBOL(gr_learn_resource);
56277 +EXPORT_SYMBOL(gr_set_kernel_label);
56278 +#ifdef CONFIG_SECURITY
56279 +EXPORT_SYMBOL(gr_check_user_change);
56280 +EXPORT_SYMBOL(gr_check_group_change);
56281 +#endif
56282 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56283 new file mode 100644
56284 index 0000000..abfa971
56285 --- /dev/null
56286 +++ b/grsecurity/grsec_exec.c
56287 @@ -0,0 +1,174 @@
56288 +#include <linux/kernel.h>
56289 +#include <linux/sched.h>
56290 +#include <linux/file.h>
56291 +#include <linux/binfmts.h>
56292 +#include <linux/fs.h>
56293 +#include <linux/types.h>
56294 +#include <linux/grdefs.h>
56295 +#include <linux/grsecurity.h>
56296 +#include <linux/grinternal.h>
56297 +#include <linux/capability.h>
56298 +#include <linux/module.h>
56299 +
56300 +#include <asm/uaccess.h>
56301 +
56302 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56303 +static char gr_exec_arg_buf[132];
56304 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56305 +#endif
56306 +
56307 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56308 +
56309 +void
56310 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56311 +{
56312 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56313 + char *grarg = gr_exec_arg_buf;
56314 + unsigned int i, x, execlen = 0;
56315 + char c;
56316 +
56317 + if (!((grsec_enable_execlog && grsec_enable_group &&
56318 + in_group_p(grsec_audit_gid))
56319 + || (grsec_enable_execlog && !grsec_enable_group)))
56320 + return;
56321 +
56322 + mutex_lock(&gr_exec_arg_mutex);
56323 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56324 +
56325 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56326 + const char __user *p;
56327 + unsigned int len;
56328 +
56329 + p = get_user_arg_ptr(argv, i);
56330 + if (IS_ERR(p))
56331 + goto log;
56332 +
56333 + len = strnlen_user(p, 128 - execlen);
56334 + if (len > 128 - execlen)
56335 + len = 128 - execlen;
56336 + else if (len > 0)
56337 + len--;
56338 + if (copy_from_user(grarg + execlen, p, len))
56339 + goto log;
56340 +
56341 + /* rewrite unprintable characters */
56342 + for (x = 0; x < len; x++) {
56343 + c = *(grarg + execlen + x);
56344 + if (c < 32 || c > 126)
56345 + *(grarg + execlen + x) = ' ';
56346 + }
56347 +
56348 + execlen += len;
56349 + *(grarg + execlen) = ' ';
56350 + *(grarg + execlen + 1) = '\0';
56351 + execlen++;
56352 + }
56353 +
56354 + log:
56355 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56356 + bprm->file->f_path.mnt, grarg);
56357 + mutex_unlock(&gr_exec_arg_mutex);
56358 +#endif
56359 + return;
56360 +}
56361 +
56362 +#ifdef CONFIG_GRKERNSEC
56363 +extern int gr_acl_is_capable(const int cap);
56364 +extern int gr_acl_is_capable_nolog(const int cap);
56365 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56366 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56367 +extern int gr_chroot_is_capable(const int cap);
56368 +extern int gr_chroot_is_capable_nolog(const int cap);
56369 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56370 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56371 +#endif
56372 +
56373 +const char *captab_log[] = {
56374 + "CAP_CHOWN",
56375 + "CAP_DAC_OVERRIDE",
56376 + "CAP_DAC_READ_SEARCH",
56377 + "CAP_FOWNER",
56378 + "CAP_FSETID",
56379 + "CAP_KILL",
56380 + "CAP_SETGID",
56381 + "CAP_SETUID",
56382 + "CAP_SETPCAP",
56383 + "CAP_LINUX_IMMUTABLE",
56384 + "CAP_NET_BIND_SERVICE",
56385 + "CAP_NET_BROADCAST",
56386 + "CAP_NET_ADMIN",
56387 + "CAP_NET_RAW",
56388 + "CAP_IPC_LOCK",
56389 + "CAP_IPC_OWNER",
56390 + "CAP_SYS_MODULE",
56391 + "CAP_SYS_RAWIO",
56392 + "CAP_SYS_CHROOT",
56393 + "CAP_SYS_PTRACE",
56394 + "CAP_SYS_PACCT",
56395 + "CAP_SYS_ADMIN",
56396 + "CAP_SYS_BOOT",
56397 + "CAP_SYS_NICE",
56398 + "CAP_SYS_RESOURCE",
56399 + "CAP_SYS_TIME",
56400 + "CAP_SYS_TTY_CONFIG",
56401 + "CAP_MKNOD",
56402 + "CAP_LEASE",
56403 + "CAP_AUDIT_WRITE",
56404 + "CAP_AUDIT_CONTROL",
56405 + "CAP_SETFCAP",
56406 + "CAP_MAC_OVERRIDE",
56407 + "CAP_MAC_ADMIN",
56408 + "CAP_SYSLOG",
56409 + "CAP_WAKE_ALARM"
56410 +};
56411 +
56412 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56413 +
56414 +int gr_is_capable(const int cap)
56415 +{
56416 +#ifdef CONFIG_GRKERNSEC
56417 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56418 + return 1;
56419 + return 0;
56420 +#else
56421 + return 1;
56422 +#endif
56423 +}
56424 +
56425 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56426 +{
56427 +#ifdef CONFIG_GRKERNSEC
56428 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56429 + return 1;
56430 + return 0;
56431 +#else
56432 + return 1;
56433 +#endif
56434 +}
56435 +
56436 +int gr_is_capable_nolog(const int cap)
56437 +{
56438 +#ifdef CONFIG_GRKERNSEC
56439 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56440 + return 1;
56441 + return 0;
56442 +#else
56443 + return 1;
56444 +#endif
56445 +}
56446 +
56447 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56448 +{
56449 +#ifdef CONFIG_GRKERNSEC
56450 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56451 + return 1;
56452 + return 0;
56453 +#else
56454 + return 1;
56455 +#endif
56456 +}
56457 +
56458 +EXPORT_SYMBOL(gr_is_capable);
56459 +EXPORT_SYMBOL(gr_is_capable_nolog);
56460 +EXPORT_SYMBOL(gr_task_is_capable);
56461 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
56462 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56463 new file mode 100644
56464 index 0000000..d3ee748
56465 --- /dev/null
56466 +++ b/grsecurity/grsec_fifo.c
56467 @@ -0,0 +1,24 @@
56468 +#include <linux/kernel.h>
56469 +#include <linux/sched.h>
56470 +#include <linux/fs.h>
56471 +#include <linux/file.h>
56472 +#include <linux/grinternal.h>
56473 +
56474 +int
56475 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56476 + const struct dentry *dir, const int flag, const int acc_mode)
56477 +{
56478 +#ifdef CONFIG_GRKERNSEC_FIFO
56479 + const struct cred *cred = current_cred();
56480 +
56481 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56482 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56483 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56484 + (cred->fsuid != dentry->d_inode->i_uid)) {
56485 + if (!inode_permission(dentry->d_inode, acc_mode))
56486 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56487 + return -EACCES;
56488 + }
56489 +#endif
56490 + return 0;
56491 +}
56492 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56493 new file mode 100644
56494 index 0000000..8ca18bf
56495 --- /dev/null
56496 +++ b/grsecurity/grsec_fork.c
56497 @@ -0,0 +1,23 @@
56498 +#include <linux/kernel.h>
56499 +#include <linux/sched.h>
56500 +#include <linux/grsecurity.h>
56501 +#include <linux/grinternal.h>
56502 +#include <linux/errno.h>
56503 +
56504 +void
56505 +gr_log_forkfail(const int retval)
56506 +{
56507 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56508 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56509 + switch (retval) {
56510 + case -EAGAIN:
56511 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56512 + break;
56513 + case -ENOMEM:
56514 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56515 + break;
56516 + }
56517 + }
56518 +#endif
56519 + return;
56520 +}
56521 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56522 new file mode 100644
56523 index 0000000..01ddde4
56524 --- /dev/null
56525 +++ b/grsecurity/grsec_init.c
56526 @@ -0,0 +1,277 @@
56527 +#include <linux/kernel.h>
56528 +#include <linux/sched.h>
56529 +#include <linux/mm.h>
56530 +#include <linux/gracl.h>
56531 +#include <linux/slab.h>
56532 +#include <linux/vmalloc.h>
56533 +#include <linux/percpu.h>
56534 +#include <linux/module.h>
56535 +
56536 +int grsec_enable_ptrace_readexec;
56537 +int grsec_enable_setxid;
56538 +int grsec_enable_brute;
56539 +int grsec_enable_link;
56540 +int grsec_enable_dmesg;
56541 +int grsec_enable_harden_ptrace;
56542 +int grsec_enable_fifo;
56543 +int grsec_enable_execlog;
56544 +int grsec_enable_signal;
56545 +int grsec_enable_forkfail;
56546 +int grsec_enable_audit_ptrace;
56547 +int grsec_enable_time;
56548 +int grsec_enable_audit_textrel;
56549 +int grsec_enable_group;
56550 +int grsec_audit_gid;
56551 +int grsec_enable_chdir;
56552 +int grsec_enable_mount;
56553 +int grsec_enable_rofs;
56554 +int grsec_enable_chroot_findtask;
56555 +int grsec_enable_chroot_mount;
56556 +int grsec_enable_chroot_shmat;
56557 +int grsec_enable_chroot_fchdir;
56558 +int grsec_enable_chroot_double;
56559 +int grsec_enable_chroot_pivot;
56560 +int grsec_enable_chroot_chdir;
56561 +int grsec_enable_chroot_chmod;
56562 +int grsec_enable_chroot_mknod;
56563 +int grsec_enable_chroot_nice;
56564 +int grsec_enable_chroot_execlog;
56565 +int grsec_enable_chroot_caps;
56566 +int grsec_enable_chroot_sysctl;
56567 +int grsec_enable_chroot_unix;
56568 +int grsec_enable_tpe;
56569 +int grsec_tpe_gid;
56570 +int grsec_enable_blackhole;
56571 +#ifdef CONFIG_IPV6_MODULE
56572 +EXPORT_SYMBOL(grsec_enable_blackhole);
56573 +#endif
56574 +int grsec_lastack_retries;
56575 +int grsec_enable_tpe_all;
56576 +int grsec_enable_tpe_invert;
56577 +int grsec_enable_socket_all;
56578 +int grsec_socket_all_gid;
56579 +int grsec_enable_socket_client;
56580 +int grsec_socket_client_gid;
56581 +int grsec_enable_socket_server;
56582 +int grsec_socket_server_gid;
56583 +int grsec_resource_logging;
56584 +int grsec_disable_privio;
56585 +int grsec_enable_log_rwxmaps;
56586 +int grsec_lock;
56587 +
56588 +DEFINE_SPINLOCK(grsec_alert_lock);
56589 +unsigned long grsec_alert_wtime = 0;
56590 +unsigned long grsec_alert_fyet = 0;
56591 +
56592 +DEFINE_SPINLOCK(grsec_audit_lock);
56593 +
56594 +DEFINE_RWLOCK(grsec_exec_file_lock);
56595 +
56596 +char *gr_shared_page[4];
56597 +
56598 +char *gr_alert_log_fmt;
56599 +char *gr_audit_log_fmt;
56600 +char *gr_alert_log_buf;
56601 +char *gr_audit_log_buf;
56602 +
56603 +extern struct gr_arg *gr_usermode;
56604 +extern unsigned char *gr_system_salt;
56605 +extern unsigned char *gr_system_sum;
56606 +
56607 +void __init
56608 +grsecurity_init(void)
56609 +{
56610 + int j;
56611 + /* create the per-cpu shared pages */
56612 +
56613 +#ifdef CONFIG_X86
56614 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56615 +#endif
56616 +
56617 + for (j = 0; j < 4; j++) {
56618 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56619 + if (gr_shared_page[j] == NULL) {
56620 + panic("Unable to allocate grsecurity shared page");
56621 + return;
56622 + }
56623 + }
56624 +
56625 + /* allocate log buffers */
56626 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56627 + if (!gr_alert_log_fmt) {
56628 + panic("Unable to allocate grsecurity alert log format buffer");
56629 + return;
56630 + }
56631 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56632 + if (!gr_audit_log_fmt) {
56633 + panic("Unable to allocate grsecurity audit log format buffer");
56634 + return;
56635 + }
56636 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56637 + if (!gr_alert_log_buf) {
56638 + panic("Unable to allocate grsecurity alert log buffer");
56639 + return;
56640 + }
56641 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56642 + if (!gr_audit_log_buf) {
56643 + panic("Unable to allocate grsecurity audit log buffer");
56644 + return;
56645 + }
56646 +
56647 + /* allocate memory for authentication structure */
56648 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56649 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56650 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56651 +
56652 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56653 + panic("Unable to allocate grsecurity authentication structure");
56654 + return;
56655 + }
56656 +
56657 +
56658 +#ifdef CONFIG_GRKERNSEC_IO
56659 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56660 + grsec_disable_privio = 1;
56661 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56662 + grsec_disable_privio = 1;
56663 +#else
56664 + grsec_disable_privio = 0;
56665 +#endif
56666 +#endif
56667 +
56668 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56669 + /* for backward compatibility, tpe_invert always defaults to on if
56670 + enabled in the kernel
56671 + */
56672 + grsec_enable_tpe_invert = 1;
56673 +#endif
56674 +
56675 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56676 +#ifndef CONFIG_GRKERNSEC_SYSCTL
56677 + grsec_lock = 1;
56678 +#endif
56679 +
56680 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56681 + grsec_enable_audit_textrel = 1;
56682 +#endif
56683 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56684 + grsec_enable_log_rwxmaps = 1;
56685 +#endif
56686 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56687 + grsec_enable_group = 1;
56688 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
56689 +#endif
56690 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56691 + grsec_enable_ptrace_readexec = 1;
56692 +#endif
56693 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56694 + grsec_enable_chdir = 1;
56695 +#endif
56696 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56697 + grsec_enable_harden_ptrace = 1;
56698 +#endif
56699 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56700 + grsec_enable_mount = 1;
56701 +#endif
56702 +#ifdef CONFIG_GRKERNSEC_LINK
56703 + grsec_enable_link = 1;
56704 +#endif
56705 +#ifdef CONFIG_GRKERNSEC_BRUTE
56706 + grsec_enable_brute = 1;
56707 +#endif
56708 +#ifdef CONFIG_GRKERNSEC_DMESG
56709 + grsec_enable_dmesg = 1;
56710 +#endif
56711 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56712 + grsec_enable_blackhole = 1;
56713 + grsec_lastack_retries = 4;
56714 +#endif
56715 +#ifdef CONFIG_GRKERNSEC_FIFO
56716 + grsec_enable_fifo = 1;
56717 +#endif
56718 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56719 + grsec_enable_execlog = 1;
56720 +#endif
56721 +#ifdef CONFIG_GRKERNSEC_SETXID
56722 + grsec_enable_setxid = 1;
56723 +#endif
56724 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56725 + grsec_enable_signal = 1;
56726 +#endif
56727 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56728 + grsec_enable_forkfail = 1;
56729 +#endif
56730 +#ifdef CONFIG_GRKERNSEC_TIME
56731 + grsec_enable_time = 1;
56732 +#endif
56733 +#ifdef CONFIG_GRKERNSEC_RESLOG
56734 + grsec_resource_logging = 1;
56735 +#endif
56736 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56737 + grsec_enable_chroot_findtask = 1;
56738 +#endif
56739 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56740 + grsec_enable_chroot_unix = 1;
56741 +#endif
56742 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56743 + grsec_enable_chroot_mount = 1;
56744 +#endif
56745 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56746 + grsec_enable_chroot_fchdir = 1;
56747 +#endif
56748 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56749 + grsec_enable_chroot_shmat = 1;
56750 +#endif
56751 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56752 + grsec_enable_audit_ptrace = 1;
56753 +#endif
56754 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56755 + grsec_enable_chroot_double = 1;
56756 +#endif
56757 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56758 + grsec_enable_chroot_pivot = 1;
56759 +#endif
56760 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56761 + grsec_enable_chroot_chdir = 1;
56762 +#endif
56763 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56764 + grsec_enable_chroot_chmod = 1;
56765 +#endif
56766 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56767 + grsec_enable_chroot_mknod = 1;
56768 +#endif
56769 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56770 + grsec_enable_chroot_nice = 1;
56771 +#endif
56772 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56773 + grsec_enable_chroot_execlog = 1;
56774 +#endif
56775 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56776 + grsec_enable_chroot_caps = 1;
56777 +#endif
56778 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56779 + grsec_enable_chroot_sysctl = 1;
56780 +#endif
56781 +#ifdef CONFIG_GRKERNSEC_TPE
56782 + grsec_enable_tpe = 1;
56783 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
56784 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56785 + grsec_enable_tpe_all = 1;
56786 +#endif
56787 +#endif
56788 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56789 + grsec_enable_socket_all = 1;
56790 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
56791 +#endif
56792 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56793 + grsec_enable_socket_client = 1;
56794 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
56795 +#endif
56796 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56797 + grsec_enable_socket_server = 1;
56798 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
56799 +#endif
56800 +#endif
56801 +
56802 + return;
56803 +}
56804 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
56805 new file mode 100644
56806 index 0000000..3efe141
56807 --- /dev/null
56808 +++ b/grsecurity/grsec_link.c
56809 @@ -0,0 +1,43 @@
56810 +#include <linux/kernel.h>
56811 +#include <linux/sched.h>
56812 +#include <linux/fs.h>
56813 +#include <linux/file.h>
56814 +#include <linux/grinternal.h>
56815 +
56816 +int
56817 +gr_handle_follow_link(const struct inode *parent,
56818 + const struct inode *inode,
56819 + const struct dentry *dentry, const struct vfsmount *mnt)
56820 +{
56821 +#ifdef CONFIG_GRKERNSEC_LINK
56822 + const struct cred *cred = current_cred();
56823 +
56824 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
56825 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
56826 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
56827 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
56828 + return -EACCES;
56829 + }
56830 +#endif
56831 + return 0;
56832 +}
56833 +
56834 +int
56835 +gr_handle_hardlink(const struct dentry *dentry,
56836 + const struct vfsmount *mnt,
56837 + struct inode *inode, const int mode, const char *to)
56838 +{
56839 +#ifdef CONFIG_GRKERNSEC_LINK
56840 + const struct cred *cred = current_cred();
56841 +
56842 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
56843 + (!S_ISREG(mode) || (mode & S_ISUID) ||
56844 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
56845 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
56846 + !capable(CAP_FOWNER) && cred->uid) {
56847 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
56848 + return -EPERM;
56849 + }
56850 +#endif
56851 + return 0;
56852 +}
56853 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
56854 new file mode 100644
56855 index 0000000..a45d2e9
56856 --- /dev/null
56857 +++ b/grsecurity/grsec_log.c
56858 @@ -0,0 +1,322 @@
56859 +#include <linux/kernel.h>
56860 +#include <linux/sched.h>
56861 +#include <linux/file.h>
56862 +#include <linux/tty.h>
56863 +#include <linux/fs.h>
56864 +#include <linux/grinternal.h>
56865 +
56866 +#ifdef CONFIG_TREE_PREEMPT_RCU
56867 +#define DISABLE_PREEMPT() preempt_disable()
56868 +#define ENABLE_PREEMPT() preempt_enable()
56869 +#else
56870 +#define DISABLE_PREEMPT()
56871 +#define ENABLE_PREEMPT()
56872 +#endif
56873 +
56874 +#define BEGIN_LOCKS(x) \
56875 + DISABLE_PREEMPT(); \
56876 + rcu_read_lock(); \
56877 + read_lock(&tasklist_lock); \
56878 + read_lock(&grsec_exec_file_lock); \
56879 + if (x != GR_DO_AUDIT) \
56880 + spin_lock(&grsec_alert_lock); \
56881 + else \
56882 + spin_lock(&grsec_audit_lock)
56883 +
56884 +#define END_LOCKS(x) \
56885 + if (x != GR_DO_AUDIT) \
56886 + spin_unlock(&grsec_alert_lock); \
56887 + else \
56888 + spin_unlock(&grsec_audit_lock); \
56889 + read_unlock(&grsec_exec_file_lock); \
56890 + read_unlock(&tasklist_lock); \
56891 + rcu_read_unlock(); \
56892 + ENABLE_PREEMPT(); \
56893 + if (x == GR_DONT_AUDIT) \
56894 + gr_handle_alertkill(current)
56895 +
56896 +enum {
56897 + FLOODING,
56898 + NO_FLOODING
56899 +};
56900 +
56901 +extern char *gr_alert_log_fmt;
56902 +extern char *gr_audit_log_fmt;
56903 +extern char *gr_alert_log_buf;
56904 +extern char *gr_audit_log_buf;
56905 +
56906 +static int gr_log_start(int audit)
56907 +{
56908 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
56909 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
56910 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56911 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
56912 + unsigned long curr_secs = get_seconds();
56913 +
56914 + if (audit == GR_DO_AUDIT)
56915 + goto set_fmt;
56916 +
56917 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
56918 + grsec_alert_wtime = curr_secs;
56919 + grsec_alert_fyet = 0;
56920 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
56921 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
56922 + grsec_alert_fyet++;
56923 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
56924 + grsec_alert_wtime = curr_secs;
56925 + grsec_alert_fyet++;
56926 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
56927 + return FLOODING;
56928 + }
56929 + else return FLOODING;
56930 +
56931 +set_fmt:
56932 +#endif
56933 + memset(buf, 0, PAGE_SIZE);
56934 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
56935 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
56936 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
56937 + } else if (current->signal->curr_ip) {
56938 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
56939 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
56940 + } else if (gr_acl_is_enabled()) {
56941 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
56942 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
56943 + } else {
56944 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
56945 + strcpy(buf, fmt);
56946 + }
56947 +
56948 + return NO_FLOODING;
56949 +}
56950 +
56951 +static void gr_log_middle(int audit, const char *msg, va_list ap)
56952 + __attribute__ ((format (printf, 2, 0)));
56953 +
56954 +static void gr_log_middle(int audit, const char *msg, va_list ap)
56955 +{
56956 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56957 + unsigned int len = strlen(buf);
56958 +
56959 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
56960 +
56961 + return;
56962 +}
56963 +
56964 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
56965 + __attribute__ ((format (printf, 2, 3)));
56966 +
56967 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
56968 +{
56969 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56970 + unsigned int len = strlen(buf);
56971 + va_list ap;
56972 +
56973 + va_start(ap, msg);
56974 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
56975 + va_end(ap);
56976 +
56977 + return;
56978 +}
56979 +
56980 +static void gr_log_end(int audit, int append_default)
56981 +{
56982 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
56983 +
56984 + if (append_default) {
56985 + unsigned int len = strlen(buf);
56986 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
56987 + }
56988 +
56989 + printk("%s\n", buf);
56990 +
56991 + return;
56992 +}
56993 +
56994 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
56995 +{
56996 + int logtype;
56997 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
56998 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
56999 + void *voidptr = NULL;
57000 + int num1 = 0, num2 = 0;
57001 + unsigned long ulong1 = 0, ulong2 = 0;
57002 + struct dentry *dentry = NULL;
57003 + struct vfsmount *mnt = NULL;
57004 + struct file *file = NULL;
57005 + struct task_struct *task = NULL;
57006 + const struct cred *cred, *pcred;
57007 + va_list ap;
57008 +
57009 + BEGIN_LOCKS(audit);
57010 + logtype = gr_log_start(audit);
57011 + if (logtype == FLOODING) {
57012 + END_LOCKS(audit);
57013 + return;
57014 + }
57015 + va_start(ap, argtypes);
57016 + switch (argtypes) {
57017 + case GR_TTYSNIFF:
57018 + task = va_arg(ap, struct task_struct *);
57019 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57020 + break;
57021 + case GR_SYSCTL_HIDDEN:
57022 + str1 = va_arg(ap, char *);
57023 + gr_log_middle_varargs(audit, msg, result, str1);
57024 + break;
57025 + case GR_RBAC:
57026 + dentry = va_arg(ap, struct dentry *);
57027 + mnt = va_arg(ap, struct vfsmount *);
57028 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57029 + break;
57030 + case GR_RBAC_STR:
57031 + dentry = va_arg(ap, struct dentry *);
57032 + mnt = va_arg(ap, struct vfsmount *);
57033 + str1 = va_arg(ap, char *);
57034 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57035 + break;
57036 + case GR_STR_RBAC:
57037 + str1 = va_arg(ap, char *);
57038 + dentry = va_arg(ap, struct dentry *);
57039 + mnt = va_arg(ap, struct vfsmount *);
57040 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57041 + break;
57042 + case GR_RBAC_MODE2:
57043 + dentry = va_arg(ap, struct dentry *);
57044 + mnt = va_arg(ap, struct vfsmount *);
57045 + str1 = va_arg(ap, char *);
57046 + str2 = va_arg(ap, char *);
57047 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57048 + break;
57049 + case GR_RBAC_MODE3:
57050 + dentry = va_arg(ap, struct dentry *);
57051 + mnt = va_arg(ap, struct vfsmount *);
57052 + str1 = va_arg(ap, char *);
57053 + str2 = va_arg(ap, char *);
57054 + str3 = va_arg(ap, char *);
57055 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57056 + break;
57057 + case GR_FILENAME:
57058 + dentry = va_arg(ap, struct dentry *);
57059 + mnt = va_arg(ap, struct vfsmount *);
57060 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57061 + break;
57062 + case GR_STR_FILENAME:
57063 + str1 = va_arg(ap, char *);
57064 + dentry = va_arg(ap, struct dentry *);
57065 + mnt = va_arg(ap, struct vfsmount *);
57066 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57067 + break;
57068 + case GR_FILENAME_STR:
57069 + dentry = va_arg(ap, struct dentry *);
57070 + mnt = va_arg(ap, struct vfsmount *);
57071 + str1 = va_arg(ap, char *);
57072 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57073 + break;
57074 + case GR_FILENAME_TWO_INT:
57075 + dentry = va_arg(ap, struct dentry *);
57076 + mnt = va_arg(ap, struct vfsmount *);
57077 + num1 = va_arg(ap, int);
57078 + num2 = va_arg(ap, int);
57079 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57080 + break;
57081 + case GR_FILENAME_TWO_INT_STR:
57082 + dentry = va_arg(ap, struct dentry *);
57083 + mnt = va_arg(ap, struct vfsmount *);
57084 + num1 = va_arg(ap, int);
57085 + num2 = va_arg(ap, int);
57086 + str1 = va_arg(ap, char *);
57087 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57088 + break;
57089 + case GR_TEXTREL:
57090 + file = va_arg(ap, struct file *);
57091 + ulong1 = va_arg(ap, unsigned long);
57092 + ulong2 = va_arg(ap, unsigned long);
57093 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57094 + break;
57095 + case GR_PTRACE:
57096 + task = va_arg(ap, struct task_struct *);
57097 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57098 + break;
57099 + case GR_RESOURCE:
57100 + task = va_arg(ap, struct task_struct *);
57101 + cred = __task_cred(task);
57102 + pcred = __task_cred(task->real_parent);
57103 + ulong1 = va_arg(ap, unsigned long);
57104 + str1 = va_arg(ap, char *);
57105 + ulong2 = va_arg(ap, unsigned long);
57106 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57107 + break;
57108 + case GR_CAP:
57109 + task = va_arg(ap, struct task_struct *);
57110 + cred = __task_cred(task);
57111 + pcred = __task_cred(task->real_parent);
57112 + str1 = va_arg(ap, char *);
57113 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57114 + break;
57115 + case GR_SIG:
57116 + str1 = va_arg(ap, char *);
57117 + voidptr = va_arg(ap, void *);
57118 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57119 + break;
57120 + case GR_SIG2:
57121 + task = va_arg(ap, struct task_struct *);
57122 + cred = __task_cred(task);
57123 + pcred = __task_cred(task->real_parent);
57124 + num1 = va_arg(ap, int);
57125 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57126 + break;
57127 + case GR_CRASH1:
57128 + task = va_arg(ap, struct task_struct *);
57129 + cred = __task_cred(task);
57130 + pcred = __task_cred(task->real_parent);
57131 + ulong1 = va_arg(ap, unsigned long);
57132 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57133 + break;
57134 + case GR_CRASH2:
57135 + task = va_arg(ap, struct task_struct *);
57136 + cred = __task_cred(task);
57137 + pcred = __task_cred(task->real_parent);
57138 + ulong1 = va_arg(ap, unsigned long);
57139 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57140 + break;
57141 + case GR_RWXMAP:
57142 + file = va_arg(ap, struct file *);
57143 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57144 + break;
57145 + case GR_PSACCT:
57146 + {
57147 + unsigned int wday, cday;
57148 + __u8 whr, chr;
57149 + __u8 wmin, cmin;
57150 + __u8 wsec, csec;
57151 + char cur_tty[64] = { 0 };
57152 + char parent_tty[64] = { 0 };
57153 +
57154 + task = va_arg(ap, struct task_struct *);
57155 + wday = va_arg(ap, unsigned int);
57156 + cday = va_arg(ap, unsigned int);
57157 + whr = va_arg(ap, int);
57158 + chr = va_arg(ap, int);
57159 + wmin = va_arg(ap, int);
57160 + cmin = va_arg(ap, int);
57161 + wsec = va_arg(ap, int);
57162 + csec = va_arg(ap, int);
57163 + ulong1 = va_arg(ap, unsigned long);
57164 + cred = __task_cred(task);
57165 + pcred = __task_cred(task->real_parent);
57166 +
57167 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57168 + }
57169 + break;
57170 + default:
57171 + gr_log_middle(audit, msg, ap);
57172 + }
57173 + va_end(ap);
57174 + // these don't need DEFAULTSECARGS printed on the end
57175 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57176 + gr_log_end(audit, 0);
57177 + else
57178 + gr_log_end(audit, 1);
57179 + END_LOCKS(audit);
57180 +}
57181 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57182 new file mode 100644
57183 index 0000000..f536303
57184 --- /dev/null
57185 +++ b/grsecurity/grsec_mem.c
57186 @@ -0,0 +1,40 @@
57187 +#include <linux/kernel.h>
57188 +#include <linux/sched.h>
57189 +#include <linux/mm.h>
57190 +#include <linux/mman.h>
57191 +#include <linux/grinternal.h>
57192 +
57193 +void
57194 +gr_handle_ioperm(void)
57195 +{
57196 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57197 + return;
57198 +}
57199 +
57200 +void
57201 +gr_handle_iopl(void)
57202 +{
57203 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57204 + return;
57205 +}
57206 +
57207 +void
57208 +gr_handle_mem_readwrite(u64 from, u64 to)
57209 +{
57210 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57211 + return;
57212 +}
57213 +
57214 +void
57215 +gr_handle_vm86(void)
57216 +{
57217 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57218 + return;
57219 +}
57220 +
57221 +void
57222 +gr_log_badprocpid(const char *entry)
57223 +{
57224 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57225 + return;
57226 +}
57227 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57228 new file mode 100644
57229 index 0000000..2131422
57230 --- /dev/null
57231 +++ b/grsecurity/grsec_mount.c
57232 @@ -0,0 +1,62 @@
57233 +#include <linux/kernel.h>
57234 +#include <linux/sched.h>
57235 +#include <linux/mount.h>
57236 +#include <linux/grsecurity.h>
57237 +#include <linux/grinternal.h>
57238 +
57239 +void
57240 +gr_log_remount(const char *devname, const int retval)
57241 +{
57242 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57243 + if (grsec_enable_mount && (retval >= 0))
57244 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57245 +#endif
57246 + return;
57247 +}
57248 +
57249 +void
57250 +gr_log_unmount(const char *devname, const int retval)
57251 +{
57252 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57253 + if (grsec_enable_mount && (retval >= 0))
57254 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57255 +#endif
57256 + return;
57257 +}
57258 +
57259 +void
57260 +gr_log_mount(const char *from, const char *to, const int retval)
57261 +{
57262 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57263 + if (grsec_enable_mount && (retval >= 0))
57264 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57265 +#endif
57266 + return;
57267 +}
57268 +
57269 +int
57270 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57271 +{
57272 +#ifdef CONFIG_GRKERNSEC_ROFS
57273 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57274 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57275 + return -EPERM;
57276 + } else
57277 + return 0;
57278 +#endif
57279 + return 0;
57280 +}
57281 +
57282 +int
57283 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57284 +{
57285 +#ifdef CONFIG_GRKERNSEC_ROFS
57286 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57287 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57288 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57289 + return -EPERM;
57290 + } else
57291 + return 0;
57292 +#endif
57293 + return 0;
57294 +}
57295 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57296 new file mode 100644
57297 index 0000000..a3b12a0
57298 --- /dev/null
57299 +++ b/grsecurity/grsec_pax.c
57300 @@ -0,0 +1,36 @@
57301 +#include <linux/kernel.h>
57302 +#include <linux/sched.h>
57303 +#include <linux/mm.h>
57304 +#include <linux/file.h>
57305 +#include <linux/grinternal.h>
57306 +#include <linux/grsecurity.h>
57307 +
57308 +void
57309 +gr_log_textrel(struct vm_area_struct * vma)
57310 +{
57311 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57312 + if (grsec_enable_audit_textrel)
57313 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57314 +#endif
57315 + return;
57316 +}
57317 +
57318 +void
57319 +gr_log_rwxmmap(struct file *file)
57320 +{
57321 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57322 + if (grsec_enable_log_rwxmaps)
57323 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57324 +#endif
57325 + return;
57326 +}
57327 +
57328 +void
57329 +gr_log_rwxmprotect(struct file *file)
57330 +{
57331 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57332 + if (grsec_enable_log_rwxmaps)
57333 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57334 +#endif
57335 + return;
57336 +}
57337 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57338 new file mode 100644
57339 index 0000000..f7f29aa
57340 --- /dev/null
57341 +++ b/grsecurity/grsec_ptrace.c
57342 @@ -0,0 +1,30 @@
57343 +#include <linux/kernel.h>
57344 +#include <linux/sched.h>
57345 +#include <linux/grinternal.h>
57346 +#include <linux/security.h>
57347 +
57348 +void
57349 +gr_audit_ptrace(struct task_struct *task)
57350 +{
57351 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57352 + if (grsec_enable_audit_ptrace)
57353 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57354 +#endif
57355 + return;
57356 +}
57357 +
57358 +int
57359 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57360 +{
57361 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57362 + const struct dentry *dentry = file->f_path.dentry;
57363 + const struct vfsmount *mnt = file->f_path.mnt;
57364 +
57365 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57366 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57367 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57368 + return -EACCES;
57369 + }
57370 +#endif
57371 + return 0;
57372 +}
57373 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57374 new file mode 100644
57375 index 0000000..7a5b2de
57376 --- /dev/null
57377 +++ b/grsecurity/grsec_sig.c
57378 @@ -0,0 +1,207 @@
57379 +#include <linux/kernel.h>
57380 +#include <linux/sched.h>
57381 +#include <linux/delay.h>
57382 +#include <linux/grsecurity.h>
57383 +#include <linux/grinternal.h>
57384 +#include <linux/hardirq.h>
57385 +
57386 +char *signames[] = {
57387 + [SIGSEGV] = "Segmentation fault",
57388 + [SIGILL] = "Illegal instruction",
57389 + [SIGABRT] = "Abort",
57390 + [SIGBUS] = "Invalid alignment/Bus error"
57391 +};
57392 +
57393 +void
57394 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57395 +{
57396 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57397 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57398 + (sig == SIGABRT) || (sig == SIGBUS))) {
57399 + if (t->pid == current->pid) {
57400 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57401 + } else {
57402 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57403 + }
57404 + }
57405 +#endif
57406 + return;
57407 +}
57408 +
57409 +int
57410 +gr_handle_signal(const struct task_struct *p, const int sig)
57411 +{
57412 +#ifdef CONFIG_GRKERNSEC
57413 + /* ignore the 0 signal for protected task checks */
57414 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57415 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57416 + return -EPERM;
57417 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57418 + return -EPERM;
57419 + }
57420 +#endif
57421 + return 0;
57422 +}
57423 +
57424 +#ifdef CONFIG_GRKERNSEC
57425 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57426 +
57427 +int gr_fake_force_sig(int sig, struct task_struct *t)
57428 +{
57429 + unsigned long int flags;
57430 + int ret, blocked, ignored;
57431 + struct k_sigaction *action;
57432 +
57433 + spin_lock_irqsave(&t->sighand->siglock, flags);
57434 + action = &t->sighand->action[sig-1];
57435 + ignored = action->sa.sa_handler == SIG_IGN;
57436 + blocked = sigismember(&t->blocked, sig);
57437 + if (blocked || ignored) {
57438 + action->sa.sa_handler = SIG_DFL;
57439 + if (blocked) {
57440 + sigdelset(&t->blocked, sig);
57441 + recalc_sigpending_and_wake(t);
57442 + }
57443 + }
57444 + if (action->sa.sa_handler == SIG_DFL)
57445 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57446 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57447 +
57448 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57449 +
57450 + return ret;
57451 +}
57452 +#endif
57453 +
57454 +#ifdef CONFIG_GRKERNSEC_BRUTE
57455 +#define GR_USER_BAN_TIME (15 * 60)
57456 +
57457 +static int __get_dumpable(unsigned long mm_flags)
57458 +{
57459 + int ret;
57460 +
57461 + ret = mm_flags & MMF_DUMPABLE_MASK;
57462 + return (ret >= 2) ? 2 : ret;
57463 +}
57464 +#endif
57465 +
57466 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57467 +{
57468 +#ifdef CONFIG_GRKERNSEC_BRUTE
57469 + uid_t uid = 0;
57470 +
57471 + if (!grsec_enable_brute)
57472 + return;
57473 +
57474 + rcu_read_lock();
57475 + read_lock(&tasklist_lock);
57476 + read_lock(&grsec_exec_file_lock);
57477 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57478 + p->real_parent->brute = 1;
57479 + else {
57480 + const struct cred *cred = __task_cred(p), *cred2;
57481 + struct task_struct *tsk, *tsk2;
57482 +
57483 + if (!__get_dumpable(mm_flags) && cred->uid) {
57484 + struct user_struct *user;
57485 +
57486 + uid = cred->uid;
57487 +
57488 + /* this is put upon execution past expiration */
57489 + user = find_user(uid);
57490 + if (user == NULL)
57491 + goto unlock;
57492 + user->banned = 1;
57493 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57494 + if (user->ban_expires == ~0UL)
57495 + user->ban_expires--;
57496 +
57497 + do_each_thread(tsk2, tsk) {
57498 + cred2 = __task_cred(tsk);
57499 + if (tsk != p && cred2->uid == uid)
57500 + gr_fake_force_sig(SIGKILL, tsk);
57501 + } while_each_thread(tsk2, tsk);
57502 + }
57503 + }
57504 +unlock:
57505 + read_unlock(&grsec_exec_file_lock);
57506 + read_unlock(&tasklist_lock);
57507 + rcu_read_unlock();
57508 +
57509 + if (uid)
57510 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57511 +
57512 +#endif
57513 + return;
57514 +}
57515 +
57516 +void gr_handle_brute_check(void)
57517 +{
57518 +#ifdef CONFIG_GRKERNSEC_BRUTE
57519 + if (current->brute)
57520 + msleep(30 * 1000);
57521 +#endif
57522 + return;
57523 +}
57524 +
57525 +void gr_handle_kernel_exploit(void)
57526 +{
57527 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57528 + const struct cred *cred;
57529 + struct task_struct *tsk, *tsk2;
57530 + struct user_struct *user;
57531 + uid_t uid;
57532 +
57533 + if (in_irq() || in_serving_softirq() || in_nmi())
57534 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57535 +
57536 + uid = current_uid();
57537 +
57538 + if (uid == 0)
57539 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57540 + else {
57541 + /* kill all the processes of this user, hold a reference
57542 + to their creds struct, and prevent them from creating
57543 + another process until system reset
57544 + */
57545 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57546 + /* we intentionally leak this ref */
57547 + user = get_uid(current->cred->user);
57548 + if (user) {
57549 + user->banned = 1;
57550 + user->ban_expires = ~0UL;
57551 + }
57552 +
57553 + read_lock(&tasklist_lock);
57554 + do_each_thread(tsk2, tsk) {
57555 + cred = __task_cred(tsk);
57556 + if (cred->uid == uid)
57557 + gr_fake_force_sig(SIGKILL, tsk);
57558 + } while_each_thread(tsk2, tsk);
57559 + read_unlock(&tasklist_lock);
57560 + }
57561 +#endif
57562 +}
57563 +
57564 +int __gr_process_user_ban(struct user_struct *user)
57565 +{
57566 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57567 + if (unlikely(user->banned)) {
57568 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57569 + user->banned = 0;
57570 + user->ban_expires = 0;
57571 + free_uid(user);
57572 + } else
57573 + return -EPERM;
57574 + }
57575 +#endif
57576 + return 0;
57577 +}
57578 +
57579 +int gr_process_user_ban(void)
57580 +{
57581 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57582 + return __gr_process_user_ban(current->cred->user);
57583 +#endif
57584 + return 0;
57585 +}
57586 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57587 new file mode 100644
57588 index 0000000..4030d57
57589 --- /dev/null
57590 +++ b/grsecurity/grsec_sock.c
57591 @@ -0,0 +1,244 @@
57592 +#include <linux/kernel.h>
57593 +#include <linux/module.h>
57594 +#include <linux/sched.h>
57595 +#include <linux/file.h>
57596 +#include <linux/net.h>
57597 +#include <linux/in.h>
57598 +#include <linux/ip.h>
57599 +#include <net/sock.h>
57600 +#include <net/inet_sock.h>
57601 +#include <linux/grsecurity.h>
57602 +#include <linux/grinternal.h>
57603 +#include <linux/gracl.h>
57604 +
57605 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57606 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57607 +
57608 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57609 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57610 +
57611 +#ifdef CONFIG_UNIX_MODULE
57612 +EXPORT_SYMBOL(gr_acl_handle_unix);
57613 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57614 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57615 +EXPORT_SYMBOL(gr_handle_create);
57616 +#endif
57617 +
57618 +#ifdef CONFIG_GRKERNSEC
57619 +#define gr_conn_table_size 32749
57620 +struct conn_table_entry {
57621 + struct conn_table_entry *next;
57622 + struct signal_struct *sig;
57623 +};
57624 +
57625 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57626 +DEFINE_SPINLOCK(gr_conn_table_lock);
57627 +
57628 +extern const char * gr_socktype_to_name(unsigned char type);
57629 +extern const char * gr_proto_to_name(unsigned char proto);
57630 +extern const char * gr_sockfamily_to_name(unsigned char family);
57631 +
57632 +static __inline__ int
57633 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57634 +{
57635 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57636 +}
57637 +
57638 +static __inline__ int
57639 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57640 + __u16 sport, __u16 dport)
57641 +{
57642 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57643 + sig->gr_sport == sport && sig->gr_dport == dport))
57644 + return 1;
57645 + else
57646 + return 0;
57647 +}
57648 +
57649 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57650 +{
57651 + struct conn_table_entry **match;
57652 + unsigned int index;
57653 +
57654 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57655 + sig->gr_sport, sig->gr_dport,
57656 + gr_conn_table_size);
57657 +
57658 + newent->sig = sig;
57659 +
57660 + match = &gr_conn_table[index];
57661 + newent->next = *match;
57662 + *match = newent;
57663 +
57664 + return;
57665 +}
57666 +
57667 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
57668 +{
57669 + struct conn_table_entry *match, *last = NULL;
57670 + unsigned int index;
57671 +
57672 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57673 + sig->gr_sport, sig->gr_dport,
57674 + gr_conn_table_size);
57675 +
57676 + match = gr_conn_table[index];
57677 + while (match && !conn_match(match->sig,
57678 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
57679 + sig->gr_dport)) {
57680 + last = match;
57681 + match = match->next;
57682 + }
57683 +
57684 + if (match) {
57685 + if (last)
57686 + last->next = match->next;
57687 + else
57688 + gr_conn_table[index] = NULL;
57689 + kfree(match);
57690 + }
57691 +
57692 + return;
57693 +}
57694 +
57695 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
57696 + __u16 sport, __u16 dport)
57697 +{
57698 + struct conn_table_entry *match;
57699 + unsigned int index;
57700 +
57701 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
57702 +
57703 + match = gr_conn_table[index];
57704 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
57705 + match = match->next;
57706 +
57707 + if (match)
57708 + return match->sig;
57709 + else
57710 + return NULL;
57711 +}
57712 +
57713 +#endif
57714 +
57715 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
57716 +{
57717 +#ifdef CONFIG_GRKERNSEC
57718 + struct signal_struct *sig = task->signal;
57719 + struct conn_table_entry *newent;
57720 +
57721 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
57722 + if (newent == NULL)
57723 + return;
57724 + /* no bh lock needed since we are called with bh disabled */
57725 + spin_lock(&gr_conn_table_lock);
57726 + gr_del_task_from_ip_table_nolock(sig);
57727 + sig->gr_saddr = inet->inet_rcv_saddr;
57728 + sig->gr_daddr = inet->inet_daddr;
57729 + sig->gr_sport = inet->inet_sport;
57730 + sig->gr_dport = inet->inet_dport;
57731 + gr_add_to_task_ip_table_nolock(sig, newent);
57732 + spin_unlock(&gr_conn_table_lock);
57733 +#endif
57734 + return;
57735 +}
57736 +
57737 +void gr_del_task_from_ip_table(struct task_struct *task)
57738 +{
57739 +#ifdef CONFIG_GRKERNSEC
57740 + spin_lock_bh(&gr_conn_table_lock);
57741 + gr_del_task_from_ip_table_nolock(task->signal);
57742 + spin_unlock_bh(&gr_conn_table_lock);
57743 +#endif
57744 + return;
57745 +}
57746 +
57747 +void
57748 +gr_attach_curr_ip(const struct sock *sk)
57749 +{
57750 +#ifdef CONFIG_GRKERNSEC
57751 + struct signal_struct *p, *set;
57752 + const struct inet_sock *inet = inet_sk(sk);
57753 +
57754 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
57755 + return;
57756 +
57757 + set = current->signal;
57758 +
57759 + spin_lock_bh(&gr_conn_table_lock);
57760 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
57761 + inet->inet_dport, inet->inet_sport);
57762 + if (unlikely(p != NULL)) {
57763 + set->curr_ip = p->curr_ip;
57764 + set->used_accept = 1;
57765 + gr_del_task_from_ip_table_nolock(p);
57766 + spin_unlock_bh(&gr_conn_table_lock);
57767 + return;
57768 + }
57769 + spin_unlock_bh(&gr_conn_table_lock);
57770 +
57771 + set->curr_ip = inet->inet_daddr;
57772 + set->used_accept = 1;
57773 +#endif
57774 + return;
57775 +}
57776 +
57777 +int
57778 +gr_handle_sock_all(const int family, const int type, const int protocol)
57779 +{
57780 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57781 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
57782 + (family != AF_UNIX)) {
57783 + if (family == AF_INET)
57784 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
57785 + else
57786 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
57787 + return -EACCES;
57788 + }
57789 +#endif
57790 + return 0;
57791 +}
57792 +
57793 +int
57794 +gr_handle_sock_server(const struct sockaddr *sck)
57795 +{
57796 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57797 + if (grsec_enable_socket_server &&
57798 + in_group_p(grsec_socket_server_gid) &&
57799 + sck && (sck->sa_family != AF_UNIX) &&
57800 + (sck->sa_family != AF_LOCAL)) {
57801 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57802 + return -EACCES;
57803 + }
57804 +#endif
57805 + return 0;
57806 +}
57807 +
57808 +int
57809 +gr_handle_sock_server_other(const struct sock *sck)
57810 +{
57811 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57812 + if (grsec_enable_socket_server &&
57813 + in_group_p(grsec_socket_server_gid) &&
57814 + sck && (sck->sk_family != AF_UNIX) &&
57815 + (sck->sk_family != AF_LOCAL)) {
57816 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57817 + return -EACCES;
57818 + }
57819 +#endif
57820 + return 0;
57821 +}
57822 +
57823 +int
57824 +gr_handle_sock_client(const struct sockaddr *sck)
57825 +{
57826 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57827 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
57828 + sck && (sck->sa_family != AF_UNIX) &&
57829 + (sck->sa_family != AF_LOCAL)) {
57830 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
57831 + return -EACCES;
57832 + }
57833 +#endif
57834 + return 0;
57835 +}
57836 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
57837 new file mode 100644
57838 index 0000000..a1aedd7
57839 --- /dev/null
57840 +++ b/grsecurity/grsec_sysctl.c
57841 @@ -0,0 +1,451 @@
57842 +#include <linux/kernel.h>
57843 +#include <linux/sched.h>
57844 +#include <linux/sysctl.h>
57845 +#include <linux/grsecurity.h>
57846 +#include <linux/grinternal.h>
57847 +
57848 +int
57849 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
57850 +{
57851 +#ifdef CONFIG_GRKERNSEC_SYSCTL
57852 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
57853 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
57854 + return -EACCES;
57855 + }
57856 +#endif
57857 + return 0;
57858 +}
57859 +
57860 +#ifdef CONFIG_GRKERNSEC_ROFS
57861 +static int __maybe_unused one = 1;
57862 +#endif
57863 +
57864 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
57865 +struct ctl_table grsecurity_table[] = {
57866 +#ifdef CONFIG_GRKERNSEC_SYSCTL
57867 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
57868 +#ifdef CONFIG_GRKERNSEC_IO
57869 + {
57870 + .procname = "disable_priv_io",
57871 + .data = &grsec_disable_privio,
57872 + .maxlen = sizeof(int),
57873 + .mode = 0600,
57874 + .proc_handler = &proc_dointvec,
57875 + },
57876 +#endif
57877 +#endif
57878 +#ifdef CONFIG_GRKERNSEC_LINK
57879 + {
57880 + .procname = "linking_restrictions",
57881 + .data = &grsec_enable_link,
57882 + .maxlen = sizeof(int),
57883 + .mode = 0600,
57884 + .proc_handler = &proc_dointvec,
57885 + },
57886 +#endif
57887 +#ifdef CONFIG_GRKERNSEC_BRUTE
57888 + {
57889 + .procname = "deter_bruteforce",
57890 + .data = &grsec_enable_brute,
57891 + .maxlen = sizeof(int),
57892 + .mode = 0600,
57893 + .proc_handler = &proc_dointvec,
57894 + },
57895 +#endif
57896 +#ifdef CONFIG_GRKERNSEC_FIFO
57897 + {
57898 + .procname = "fifo_restrictions",
57899 + .data = &grsec_enable_fifo,
57900 + .maxlen = sizeof(int),
57901 + .mode = 0600,
57902 + .proc_handler = &proc_dointvec,
57903 + },
57904 +#endif
57905 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57906 + {
57907 + .procname = "ptrace_readexec",
57908 + .data = &grsec_enable_ptrace_readexec,
57909 + .maxlen = sizeof(int),
57910 + .mode = 0600,
57911 + .proc_handler = &proc_dointvec,
57912 + },
57913 +#endif
57914 +#ifdef CONFIG_GRKERNSEC_SETXID
57915 + {
57916 + .procname = "consistent_setxid",
57917 + .data = &grsec_enable_setxid,
57918 + .maxlen = sizeof(int),
57919 + .mode = 0600,
57920 + .proc_handler = &proc_dointvec,
57921 + },
57922 +#endif
57923 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57924 + {
57925 + .procname = "ip_blackhole",
57926 + .data = &grsec_enable_blackhole,
57927 + .maxlen = sizeof(int),
57928 + .mode = 0600,
57929 + .proc_handler = &proc_dointvec,
57930 + },
57931 + {
57932 + .procname = "lastack_retries",
57933 + .data = &grsec_lastack_retries,
57934 + .maxlen = sizeof(int),
57935 + .mode = 0600,
57936 + .proc_handler = &proc_dointvec,
57937 + },
57938 +#endif
57939 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57940 + {
57941 + .procname = "exec_logging",
57942 + .data = &grsec_enable_execlog,
57943 + .maxlen = sizeof(int),
57944 + .mode = 0600,
57945 + .proc_handler = &proc_dointvec,
57946 + },
57947 +#endif
57948 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57949 + {
57950 + .procname = "rwxmap_logging",
57951 + .data = &grsec_enable_log_rwxmaps,
57952 + .maxlen = sizeof(int),
57953 + .mode = 0600,
57954 + .proc_handler = &proc_dointvec,
57955 + },
57956 +#endif
57957 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57958 + {
57959 + .procname = "signal_logging",
57960 + .data = &grsec_enable_signal,
57961 + .maxlen = sizeof(int),
57962 + .mode = 0600,
57963 + .proc_handler = &proc_dointvec,
57964 + },
57965 +#endif
57966 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57967 + {
57968 + .procname = "forkfail_logging",
57969 + .data = &grsec_enable_forkfail,
57970 + .maxlen = sizeof(int),
57971 + .mode = 0600,
57972 + .proc_handler = &proc_dointvec,
57973 + },
57974 +#endif
57975 +#ifdef CONFIG_GRKERNSEC_TIME
57976 + {
57977 + .procname = "timechange_logging",
57978 + .data = &grsec_enable_time,
57979 + .maxlen = sizeof(int),
57980 + .mode = 0600,
57981 + .proc_handler = &proc_dointvec,
57982 + },
57983 +#endif
57984 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57985 + {
57986 + .procname = "chroot_deny_shmat",
57987 + .data = &grsec_enable_chroot_shmat,
57988 + .maxlen = sizeof(int),
57989 + .mode = 0600,
57990 + .proc_handler = &proc_dointvec,
57991 + },
57992 +#endif
57993 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57994 + {
57995 + .procname = "chroot_deny_unix",
57996 + .data = &grsec_enable_chroot_unix,
57997 + .maxlen = sizeof(int),
57998 + .mode = 0600,
57999 + .proc_handler = &proc_dointvec,
58000 + },
58001 +#endif
58002 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58003 + {
58004 + .procname = "chroot_deny_mount",
58005 + .data = &grsec_enable_chroot_mount,
58006 + .maxlen = sizeof(int),
58007 + .mode = 0600,
58008 + .proc_handler = &proc_dointvec,
58009 + },
58010 +#endif
58011 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58012 + {
58013 + .procname = "chroot_deny_fchdir",
58014 + .data = &grsec_enable_chroot_fchdir,
58015 + .maxlen = sizeof(int),
58016 + .mode = 0600,
58017 + .proc_handler = &proc_dointvec,
58018 + },
58019 +#endif
58020 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58021 + {
58022 + .procname = "chroot_deny_chroot",
58023 + .data = &grsec_enable_chroot_double,
58024 + .maxlen = sizeof(int),
58025 + .mode = 0600,
58026 + .proc_handler = &proc_dointvec,
58027 + },
58028 +#endif
58029 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58030 + {
58031 + .procname = "chroot_deny_pivot",
58032 + .data = &grsec_enable_chroot_pivot,
58033 + .maxlen = sizeof(int),
58034 + .mode = 0600,
58035 + .proc_handler = &proc_dointvec,
58036 + },
58037 +#endif
58038 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58039 + {
58040 + .procname = "chroot_enforce_chdir",
58041 + .data = &grsec_enable_chroot_chdir,
58042 + .maxlen = sizeof(int),
58043 + .mode = 0600,
58044 + .proc_handler = &proc_dointvec,
58045 + },
58046 +#endif
58047 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58048 + {
58049 + .procname = "chroot_deny_chmod",
58050 + .data = &grsec_enable_chroot_chmod,
58051 + .maxlen = sizeof(int),
58052 + .mode = 0600,
58053 + .proc_handler = &proc_dointvec,
58054 + },
58055 +#endif
58056 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58057 + {
58058 + .procname = "chroot_deny_mknod",
58059 + .data = &grsec_enable_chroot_mknod,
58060 + .maxlen = sizeof(int),
58061 + .mode = 0600,
58062 + .proc_handler = &proc_dointvec,
58063 + },
58064 +#endif
58065 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58066 + {
58067 + .procname = "chroot_restrict_nice",
58068 + .data = &grsec_enable_chroot_nice,
58069 + .maxlen = sizeof(int),
58070 + .mode = 0600,
58071 + .proc_handler = &proc_dointvec,
58072 + },
58073 +#endif
58074 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58075 + {
58076 + .procname = "chroot_execlog",
58077 + .data = &grsec_enable_chroot_execlog,
58078 + .maxlen = sizeof(int),
58079 + .mode = 0600,
58080 + .proc_handler = &proc_dointvec,
58081 + },
58082 +#endif
58083 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58084 + {
58085 + .procname = "chroot_caps",
58086 + .data = &grsec_enable_chroot_caps,
58087 + .maxlen = sizeof(int),
58088 + .mode = 0600,
58089 + .proc_handler = &proc_dointvec,
58090 + },
58091 +#endif
58092 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58093 + {
58094 + .procname = "chroot_deny_sysctl",
58095 + .data = &grsec_enable_chroot_sysctl,
58096 + .maxlen = sizeof(int),
58097 + .mode = 0600,
58098 + .proc_handler = &proc_dointvec,
58099 + },
58100 +#endif
58101 +#ifdef CONFIG_GRKERNSEC_TPE
58102 + {
58103 + .procname = "tpe",
58104 + .data = &grsec_enable_tpe,
58105 + .maxlen = sizeof(int),
58106 + .mode = 0600,
58107 + .proc_handler = &proc_dointvec,
58108 + },
58109 + {
58110 + .procname = "tpe_gid",
58111 + .data = &grsec_tpe_gid,
58112 + .maxlen = sizeof(int),
58113 + .mode = 0600,
58114 + .proc_handler = &proc_dointvec,
58115 + },
58116 +#endif
58117 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58118 + {
58119 + .procname = "tpe_invert",
58120 + .data = &grsec_enable_tpe_invert,
58121 + .maxlen = sizeof(int),
58122 + .mode = 0600,
58123 + .proc_handler = &proc_dointvec,
58124 + },
58125 +#endif
58126 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58127 + {
58128 + .procname = "tpe_restrict_all",
58129 + .data = &grsec_enable_tpe_all,
58130 + .maxlen = sizeof(int),
58131 + .mode = 0600,
58132 + .proc_handler = &proc_dointvec,
58133 + },
58134 +#endif
58135 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58136 + {
58137 + .procname = "socket_all",
58138 + .data = &grsec_enable_socket_all,
58139 + .maxlen = sizeof(int),
58140 + .mode = 0600,
58141 + .proc_handler = &proc_dointvec,
58142 + },
58143 + {
58144 + .procname = "socket_all_gid",
58145 + .data = &grsec_socket_all_gid,
58146 + .maxlen = sizeof(int),
58147 + .mode = 0600,
58148 + .proc_handler = &proc_dointvec,
58149 + },
58150 +#endif
58151 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58152 + {
58153 + .procname = "socket_client",
58154 + .data = &grsec_enable_socket_client,
58155 + .maxlen = sizeof(int),
58156 + .mode = 0600,
58157 + .proc_handler = &proc_dointvec,
58158 + },
58159 + {
58160 + .procname = "socket_client_gid",
58161 + .data = &grsec_socket_client_gid,
58162 + .maxlen = sizeof(int),
58163 + .mode = 0600,
58164 + .proc_handler = &proc_dointvec,
58165 + },
58166 +#endif
58167 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58168 + {
58169 + .procname = "socket_server",
58170 + .data = &grsec_enable_socket_server,
58171 + .maxlen = sizeof(int),
58172 + .mode = 0600,
58173 + .proc_handler = &proc_dointvec,
58174 + },
58175 + {
58176 + .procname = "socket_server_gid",
58177 + .data = &grsec_socket_server_gid,
58178 + .maxlen = sizeof(int),
58179 + .mode = 0600,
58180 + .proc_handler = &proc_dointvec,
58181 + },
58182 +#endif
58183 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58184 + {
58185 + .procname = "audit_group",
58186 + .data = &grsec_enable_group,
58187 + .maxlen = sizeof(int),
58188 + .mode = 0600,
58189 + .proc_handler = &proc_dointvec,
58190 + },
58191 + {
58192 + .procname = "audit_gid",
58193 + .data = &grsec_audit_gid,
58194 + .maxlen = sizeof(int),
58195 + .mode = 0600,
58196 + .proc_handler = &proc_dointvec,
58197 + },
58198 +#endif
58199 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58200 + {
58201 + .procname = "audit_chdir",
58202 + .data = &grsec_enable_chdir,
58203 + .maxlen = sizeof(int),
58204 + .mode = 0600,
58205 + .proc_handler = &proc_dointvec,
58206 + },
58207 +#endif
58208 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58209 + {
58210 + .procname = "audit_mount",
58211 + .data = &grsec_enable_mount,
58212 + .maxlen = sizeof(int),
58213 + .mode = 0600,
58214 + .proc_handler = &proc_dointvec,
58215 + },
58216 +#endif
58217 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58218 + {
58219 + .procname = "audit_textrel",
58220 + .data = &grsec_enable_audit_textrel,
58221 + .maxlen = sizeof(int),
58222 + .mode = 0600,
58223 + .proc_handler = &proc_dointvec,
58224 + },
58225 +#endif
58226 +#ifdef CONFIG_GRKERNSEC_DMESG
58227 + {
58228 + .procname = "dmesg",
58229 + .data = &grsec_enable_dmesg,
58230 + .maxlen = sizeof(int),
58231 + .mode = 0600,
58232 + .proc_handler = &proc_dointvec,
58233 + },
58234 +#endif
58235 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58236 + {
58237 + .procname = "chroot_findtask",
58238 + .data = &grsec_enable_chroot_findtask,
58239 + .maxlen = sizeof(int),
58240 + .mode = 0600,
58241 + .proc_handler = &proc_dointvec,
58242 + },
58243 +#endif
58244 +#ifdef CONFIG_GRKERNSEC_RESLOG
58245 + {
58246 + .procname = "resource_logging",
58247 + .data = &grsec_resource_logging,
58248 + .maxlen = sizeof(int),
58249 + .mode = 0600,
58250 + .proc_handler = &proc_dointvec,
58251 + },
58252 +#endif
58253 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58254 + {
58255 + .procname = "audit_ptrace",
58256 + .data = &grsec_enable_audit_ptrace,
58257 + .maxlen = sizeof(int),
58258 + .mode = 0600,
58259 + .proc_handler = &proc_dointvec,
58260 + },
58261 +#endif
58262 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58263 + {
58264 + .procname = "harden_ptrace",
58265 + .data = &grsec_enable_harden_ptrace,
58266 + .maxlen = sizeof(int),
58267 + .mode = 0600,
58268 + .proc_handler = &proc_dointvec,
58269 + },
58270 +#endif
58271 + {
58272 + .procname = "grsec_lock",
58273 + .data = &grsec_lock,
58274 + .maxlen = sizeof(int),
58275 + .mode = 0600,
58276 + .proc_handler = &proc_dointvec,
58277 + },
58278 +#endif
58279 +#ifdef CONFIG_GRKERNSEC_ROFS
58280 + {
58281 + .procname = "romount_protect",
58282 + .data = &grsec_enable_rofs,
58283 + .maxlen = sizeof(int),
58284 + .mode = 0600,
58285 + .proc_handler = &proc_dointvec_minmax,
58286 + .extra1 = &one,
58287 + .extra2 = &one,
58288 + },
58289 +#endif
58290 + { }
58291 +};
58292 +#endif
58293 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58294 new file mode 100644
58295 index 0000000..0dc13c3
58296 --- /dev/null
58297 +++ b/grsecurity/grsec_time.c
58298 @@ -0,0 +1,16 @@
58299 +#include <linux/kernel.h>
58300 +#include <linux/sched.h>
58301 +#include <linux/grinternal.h>
58302 +#include <linux/module.h>
58303 +
58304 +void
58305 +gr_log_timechange(void)
58306 +{
58307 +#ifdef CONFIG_GRKERNSEC_TIME
58308 + if (grsec_enable_time)
58309 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58310 +#endif
58311 + return;
58312 +}
58313 +
58314 +EXPORT_SYMBOL(gr_log_timechange);
58315 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58316 new file mode 100644
58317 index 0000000..07e0dc0
58318 --- /dev/null
58319 +++ b/grsecurity/grsec_tpe.c
58320 @@ -0,0 +1,73 @@
58321 +#include <linux/kernel.h>
58322 +#include <linux/sched.h>
58323 +#include <linux/file.h>
58324 +#include <linux/fs.h>
58325 +#include <linux/grinternal.h>
58326 +
58327 +extern int gr_acl_tpe_check(void);
58328 +
58329 +int
58330 +gr_tpe_allow(const struct file *file)
58331 +{
58332 +#ifdef CONFIG_GRKERNSEC
58333 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58334 + const struct cred *cred = current_cred();
58335 + char *msg = NULL;
58336 + char *msg2 = NULL;
58337 +
58338 + // never restrict root
58339 + if (!cred->uid)
58340 + return 1;
58341 +
58342 + if (grsec_enable_tpe) {
58343 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58344 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58345 + msg = "not being in trusted group";
58346 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58347 + msg = "being in untrusted group";
58348 +#else
58349 + if (in_group_p(grsec_tpe_gid))
58350 + msg = "being in untrusted group";
58351 +#endif
58352 + }
58353 + if (!msg && gr_acl_tpe_check())
58354 + msg = "being in untrusted role";
58355 +
58356 + // not in any affected group/role
58357 + if (!msg)
58358 + goto next_check;
58359 +
58360 + if (inode->i_uid)
58361 + msg2 = "file in non-root-owned directory";
58362 + else if (inode->i_mode & S_IWOTH)
58363 + msg2 = "file in world-writable directory";
58364 + else if (inode->i_mode & S_IWGRP)
58365 + msg2 = "file in group-writable directory";
58366 +
58367 + if (msg && msg2) {
58368 + char fullmsg[70] = {0};
58369 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58370 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58371 + return 0;
58372 + }
58373 + msg = NULL;
58374 +next_check:
58375 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58376 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58377 + return 1;
58378 +
58379 + if (inode->i_uid && (inode->i_uid != cred->uid))
58380 + msg = "directory not owned by user";
58381 + else if (inode->i_mode & S_IWOTH)
58382 + msg = "file in world-writable directory";
58383 + else if (inode->i_mode & S_IWGRP)
58384 + msg = "file in group-writable directory";
58385 +
58386 + if (msg) {
58387 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58388 + return 0;
58389 + }
58390 +#endif
58391 +#endif
58392 + return 1;
58393 +}
58394 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58395 new file mode 100644
58396 index 0000000..9f7b1ac
58397 --- /dev/null
58398 +++ b/grsecurity/grsum.c
58399 @@ -0,0 +1,61 @@
58400 +#include <linux/err.h>
58401 +#include <linux/kernel.h>
58402 +#include <linux/sched.h>
58403 +#include <linux/mm.h>
58404 +#include <linux/scatterlist.h>
58405 +#include <linux/crypto.h>
58406 +#include <linux/gracl.h>
58407 +
58408 +
58409 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58410 +#error "crypto and sha256 must be built into the kernel"
58411 +#endif
58412 +
58413 +int
58414 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58415 +{
58416 + char *p;
58417 + struct crypto_hash *tfm;
58418 + struct hash_desc desc;
58419 + struct scatterlist sg;
58420 + unsigned char temp_sum[GR_SHA_LEN];
58421 + volatile int retval = 0;
58422 + volatile int dummy = 0;
58423 + unsigned int i;
58424 +
58425 + sg_init_table(&sg, 1);
58426 +
58427 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58428 + if (IS_ERR(tfm)) {
58429 + /* should never happen, since sha256 should be built in */
58430 + return 1;
58431 + }
58432 +
58433 + desc.tfm = tfm;
58434 + desc.flags = 0;
58435 +
58436 + crypto_hash_init(&desc);
58437 +
58438 + p = salt;
58439 + sg_set_buf(&sg, p, GR_SALT_LEN);
58440 + crypto_hash_update(&desc, &sg, sg.length);
58441 +
58442 + p = entry->pw;
58443 + sg_set_buf(&sg, p, strlen(p));
58444 +
58445 + crypto_hash_update(&desc, &sg, sg.length);
58446 +
58447 + crypto_hash_final(&desc, temp_sum);
58448 +
58449 + memset(entry->pw, 0, GR_PW_LEN);
58450 +
58451 + for (i = 0; i < GR_SHA_LEN; i++)
58452 + if (sum[i] != temp_sum[i])
58453 + retval = 1;
58454 + else
58455 + dummy = 1; // waste a cycle
58456 +
58457 + crypto_free_hash(tfm);
58458 +
58459 + return retval;
58460 +}
58461 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58462 index 6cd5b64..f620d2d 100644
58463 --- a/include/acpi/acpi_bus.h
58464 +++ b/include/acpi/acpi_bus.h
58465 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58466 acpi_op_bind bind;
58467 acpi_op_unbind unbind;
58468 acpi_op_notify notify;
58469 -};
58470 +} __no_const;
58471
58472 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58473
58474 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58475 index b7babf0..71e4e74 100644
58476 --- a/include/asm-generic/atomic-long.h
58477 +++ b/include/asm-generic/atomic-long.h
58478 @@ -22,6 +22,12 @@
58479
58480 typedef atomic64_t atomic_long_t;
58481
58482 +#ifdef CONFIG_PAX_REFCOUNT
58483 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58484 +#else
58485 +typedef atomic64_t atomic_long_unchecked_t;
58486 +#endif
58487 +
58488 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58489
58490 static inline long atomic_long_read(atomic_long_t *l)
58491 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58492 return (long)atomic64_read(v);
58493 }
58494
58495 +#ifdef CONFIG_PAX_REFCOUNT
58496 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58497 +{
58498 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58499 +
58500 + return (long)atomic64_read_unchecked(v);
58501 +}
58502 +#endif
58503 +
58504 static inline void atomic_long_set(atomic_long_t *l, long i)
58505 {
58506 atomic64_t *v = (atomic64_t *)l;
58507 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58508 atomic64_set(v, i);
58509 }
58510
58511 +#ifdef CONFIG_PAX_REFCOUNT
58512 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58513 +{
58514 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58515 +
58516 + atomic64_set_unchecked(v, i);
58517 +}
58518 +#endif
58519 +
58520 static inline void atomic_long_inc(atomic_long_t *l)
58521 {
58522 atomic64_t *v = (atomic64_t *)l;
58523 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58524 atomic64_inc(v);
58525 }
58526
58527 +#ifdef CONFIG_PAX_REFCOUNT
58528 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58529 +{
58530 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58531 +
58532 + atomic64_inc_unchecked(v);
58533 +}
58534 +#endif
58535 +
58536 static inline void atomic_long_dec(atomic_long_t *l)
58537 {
58538 atomic64_t *v = (atomic64_t *)l;
58539 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58540 atomic64_dec(v);
58541 }
58542
58543 +#ifdef CONFIG_PAX_REFCOUNT
58544 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58545 +{
58546 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58547 +
58548 + atomic64_dec_unchecked(v);
58549 +}
58550 +#endif
58551 +
58552 static inline void atomic_long_add(long i, atomic_long_t *l)
58553 {
58554 atomic64_t *v = (atomic64_t *)l;
58555 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58556 atomic64_add(i, v);
58557 }
58558
58559 +#ifdef CONFIG_PAX_REFCOUNT
58560 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58561 +{
58562 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58563 +
58564 + atomic64_add_unchecked(i, v);
58565 +}
58566 +#endif
58567 +
58568 static inline void atomic_long_sub(long i, atomic_long_t *l)
58569 {
58570 atomic64_t *v = (atomic64_t *)l;
58571 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58572 atomic64_sub(i, v);
58573 }
58574
58575 +#ifdef CONFIG_PAX_REFCOUNT
58576 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58577 +{
58578 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58579 +
58580 + atomic64_sub_unchecked(i, v);
58581 +}
58582 +#endif
58583 +
58584 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58585 {
58586 atomic64_t *v = (atomic64_t *)l;
58587 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58588 return (long)atomic64_inc_return(v);
58589 }
58590
58591 +#ifdef CONFIG_PAX_REFCOUNT
58592 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58593 +{
58594 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58595 +
58596 + return (long)atomic64_inc_return_unchecked(v);
58597 +}
58598 +#endif
58599 +
58600 static inline long atomic_long_dec_return(atomic_long_t *l)
58601 {
58602 atomic64_t *v = (atomic64_t *)l;
58603 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58604
58605 typedef atomic_t atomic_long_t;
58606
58607 +#ifdef CONFIG_PAX_REFCOUNT
58608 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58609 +#else
58610 +typedef atomic_t atomic_long_unchecked_t;
58611 +#endif
58612 +
58613 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58614 static inline long atomic_long_read(atomic_long_t *l)
58615 {
58616 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58617 return (long)atomic_read(v);
58618 }
58619
58620 +#ifdef CONFIG_PAX_REFCOUNT
58621 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58622 +{
58623 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58624 +
58625 + return (long)atomic_read_unchecked(v);
58626 +}
58627 +#endif
58628 +
58629 static inline void atomic_long_set(atomic_long_t *l, long i)
58630 {
58631 atomic_t *v = (atomic_t *)l;
58632 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58633 atomic_set(v, i);
58634 }
58635
58636 +#ifdef CONFIG_PAX_REFCOUNT
58637 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58638 +{
58639 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58640 +
58641 + atomic_set_unchecked(v, i);
58642 +}
58643 +#endif
58644 +
58645 static inline void atomic_long_inc(atomic_long_t *l)
58646 {
58647 atomic_t *v = (atomic_t *)l;
58648 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58649 atomic_inc(v);
58650 }
58651
58652 +#ifdef CONFIG_PAX_REFCOUNT
58653 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58654 +{
58655 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58656 +
58657 + atomic_inc_unchecked(v);
58658 +}
58659 +#endif
58660 +
58661 static inline void atomic_long_dec(atomic_long_t *l)
58662 {
58663 atomic_t *v = (atomic_t *)l;
58664 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58665 atomic_dec(v);
58666 }
58667
58668 +#ifdef CONFIG_PAX_REFCOUNT
58669 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58670 +{
58671 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58672 +
58673 + atomic_dec_unchecked(v);
58674 +}
58675 +#endif
58676 +
58677 static inline void atomic_long_add(long i, atomic_long_t *l)
58678 {
58679 atomic_t *v = (atomic_t *)l;
58680 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58681 atomic_add(i, v);
58682 }
58683
58684 +#ifdef CONFIG_PAX_REFCOUNT
58685 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58686 +{
58687 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58688 +
58689 + atomic_add_unchecked(i, v);
58690 +}
58691 +#endif
58692 +
58693 static inline void atomic_long_sub(long i, atomic_long_t *l)
58694 {
58695 atomic_t *v = (atomic_t *)l;
58696 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58697 atomic_sub(i, v);
58698 }
58699
58700 +#ifdef CONFIG_PAX_REFCOUNT
58701 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58702 +{
58703 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58704 +
58705 + atomic_sub_unchecked(i, v);
58706 +}
58707 +#endif
58708 +
58709 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58710 {
58711 atomic_t *v = (atomic_t *)l;
58712 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58713 return (long)atomic_inc_return(v);
58714 }
58715
58716 +#ifdef CONFIG_PAX_REFCOUNT
58717 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58718 +{
58719 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58720 +
58721 + return (long)atomic_inc_return_unchecked(v);
58722 +}
58723 +#endif
58724 +
58725 static inline long atomic_long_dec_return(atomic_long_t *l)
58726 {
58727 atomic_t *v = (atomic_t *)l;
58728 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58729
58730 #endif /* BITS_PER_LONG == 64 */
58731
58732 +#ifdef CONFIG_PAX_REFCOUNT
58733 +static inline void pax_refcount_needs_these_functions(void)
58734 +{
58735 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
58736 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
58737 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
58738 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
58739 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
58740 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
58741 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
58742 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
58743 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
58744 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
58745 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
58746 +
58747 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
58748 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
58749 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
58750 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
58751 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
58752 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
58753 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
58754 +}
58755 +#else
58756 +#define atomic_read_unchecked(v) atomic_read(v)
58757 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
58758 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
58759 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
58760 +#define atomic_inc_unchecked(v) atomic_inc(v)
58761 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
58762 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
58763 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
58764 +#define atomic_dec_unchecked(v) atomic_dec(v)
58765 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
58766 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
58767 +
58768 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
58769 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
58770 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
58771 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
58772 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
58773 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
58774 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
58775 +#endif
58776 +
58777 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
58778 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
58779 index b18ce4f..2ee2843 100644
58780 --- a/include/asm-generic/atomic64.h
58781 +++ b/include/asm-generic/atomic64.h
58782 @@ -16,6 +16,8 @@ typedef struct {
58783 long long counter;
58784 } atomic64_t;
58785
58786 +typedef atomic64_t atomic64_unchecked_t;
58787 +
58788 #define ATOMIC64_INIT(i) { (i) }
58789
58790 extern long long atomic64_read(const atomic64_t *v);
58791 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
58792 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
58793 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
58794
58795 +#define atomic64_read_unchecked(v) atomic64_read(v)
58796 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
58797 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
58798 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
58799 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
58800 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
58801 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
58802 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
58803 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
58804 +
58805 #endif /* _ASM_GENERIC_ATOMIC64_H */
58806 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
58807 index 1bfcfe5..e04c5c9 100644
58808 --- a/include/asm-generic/cache.h
58809 +++ b/include/asm-generic/cache.h
58810 @@ -6,7 +6,7 @@
58811 * cache lines need to provide their own cache.h.
58812 */
58813
58814 -#define L1_CACHE_SHIFT 5
58815 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
58816 +#define L1_CACHE_SHIFT 5UL
58817 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
58818
58819 #endif /* __ASM_GENERIC_CACHE_H */
58820 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
58821 index 0d68a1e..b74a761 100644
58822 --- a/include/asm-generic/emergency-restart.h
58823 +++ b/include/asm-generic/emergency-restart.h
58824 @@ -1,7 +1,7 @@
58825 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
58826 #define _ASM_GENERIC_EMERGENCY_RESTART_H
58827
58828 -static inline void machine_emergency_restart(void)
58829 +static inline __noreturn void machine_emergency_restart(void)
58830 {
58831 machine_restart(NULL);
58832 }
58833 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
58834 index 1ca3efc..e3dc852 100644
58835 --- a/include/asm-generic/int-l64.h
58836 +++ b/include/asm-generic/int-l64.h
58837 @@ -46,6 +46,8 @@ typedef unsigned int u32;
58838 typedef signed long s64;
58839 typedef unsigned long u64;
58840
58841 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
58842 +
58843 #define S8_C(x) x
58844 #define U8_C(x) x ## U
58845 #define S16_C(x) x
58846 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
58847 index f394147..b6152b9 100644
58848 --- a/include/asm-generic/int-ll64.h
58849 +++ b/include/asm-generic/int-ll64.h
58850 @@ -51,6 +51,8 @@ typedef unsigned int u32;
58851 typedef signed long long s64;
58852 typedef unsigned long long u64;
58853
58854 +typedef unsigned long long intoverflow_t;
58855 +
58856 #define S8_C(x) x
58857 #define U8_C(x) x ## U
58858 #define S16_C(x) x
58859 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
58860 index 0232ccb..13d9165 100644
58861 --- a/include/asm-generic/kmap_types.h
58862 +++ b/include/asm-generic/kmap_types.h
58863 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
58864 KMAP_D(17) KM_NMI,
58865 KMAP_D(18) KM_NMI_PTE,
58866 KMAP_D(19) KM_KDB,
58867 +KMAP_D(20) KM_CLEARPAGE,
58868 /*
58869 * Remember to update debug_kmap_atomic() when adding new kmap types!
58870 */
58871 -KMAP_D(20) KM_TYPE_NR
58872 +KMAP_D(21) KM_TYPE_NR
58873 };
58874
58875 #undef KMAP_D
58876 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
58877 index 9ceb03b..2efbcbd 100644
58878 --- a/include/asm-generic/local.h
58879 +++ b/include/asm-generic/local.h
58880 @@ -39,6 +39,7 @@ typedef struct
58881 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
58882 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
58883 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
58884 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
58885
58886 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
58887 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
58888 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
58889 index 725612b..9cc513a 100644
58890 --- a/include/asm-generic/pgtable-nopmd.h
58891 +++ b/include/asm-generic/pgtable-nopmd.h
58892 @@ -1,14 +1,19 @@
58893 #ifndef _PGTABLE_NOPMD_H
58894 #define _PGTABLE_NOPMD_H
58895
58896 -#ifndef __ASSEMBLY__
58897 -
58898 #include <asm-generic/pgtable-nopud.h>
58899
58900 -struct mm_struct;
58901 -
58902 #define __PAGETABLE_PMD_FOLDED
58903
58904 +#define PMD_SHIFT PUD_SHIFT
58905 +#define PTRS_PER_PMD 1
58906 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
58907 +#define PMD_MASK (~(PMD_SIZE-1))
58908 +
58909 +#ifndef __ASSEMBLY__
58910 +
58911 +struct mm_struct;
58912 +
58913 /*
58914 * Having the pmd type consist of a pud gets the size right, and allows
58915 * us to conceptually access the pud entry that this pmd is folded into
58916 @@ -16,11 +21,6 @@ struct mm_struct;
58917 */
58918 typedef struct { pud_t pud; } pmd_t;
58919
58920 -#define PMD_SHIFT PUD_SHIFT
58921 -#define PTRS_PER_PMD 1
58922 -#define PMD_SIZE (1UL << PMD_SHIFT)
58923 -#define PMD_MASK (~(PMD_SIZE-1))
58924 -
58925 /*
58926 * The "pud_xxx()" functions here are trivial for a folded two-level
58927 * setup: the pmd is never bad, and a pmd always exists (as it's folded
58928 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
58929 index 810431d..ccc3638 100644
58930 --- a/include/asm-generic/pgtable-nopud.h
58931 +++ b/include/asm-generic/pgtable-nopud.h
58932 @@ -1,10 +1,15 @@
58933 #ifndef _PGTABLE_NOPUD_H
58934 #define _PGTABLE_NOPUD_H
58935
58936 -#ifndef __ASSEMBLY__
58937 -
58938 #define __PAGETABLE_PUD_FOLDED
58939
58940 +#define PUD_SHIFT PGDIR_SHIFT
58941 +#define PTRS_PER_PUD 1
58942 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
58943 +#define PUD_MASK (~(PUD_SIZE-1))
58944 +
58945 +#ifndef __ASSEMBLY__
58946 +
58947 /*
58948 * Having the pud type consist of a pgd gets the size right, and allows
58949 * us to conceptually access the pgd entry that this pud is folded into
58950 @@ -12,11 +17,6 @@
58951 */
58952 typedef struct { pgd_t pgd; } pud_t;
58953
58954 -#define PUD_SHIFT PGDIR_SHIFT
58955 -#define PTRS_PER_PUD 1
58956 -#define PUD_SIZE (1UL << PUD_SHIFT)
58957 -#define PUD_MASK (~(PUD_SIZE-1))
58958 -
58959 /*
58960 * The "pgd_xxx()" functions here are trivial for a folded two-level
58961 * setup: the pud is never bad, and a pud always exists (as it's folded
58962 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
58963 index 76bff2b..c7a14e2 100644
58964 --- a/include/asm-generic/pgtable.h
58965 +++ b/include/asm-generic/pgtable.h
58966 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
58967 #endif /* __HAVE_ARCH_PMD_WRITE */
58968 #endif
58969
58970 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
58971 +static inline unsigned long pax_open_kernel(void) { return 0; }
58972 +#endif
58973 +
58974 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
58975 +static inline unsigned long pax_close_kernel(void) { return 0; }
58976 +#endif
58977 +
58978 #endif /* !__ASSEMBLY__ */
58979
58980 #endif /* _ASM_GENERIC_PGTABLE_H */
58981 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
58982 index b5e2e4c..6a5373e 100644
58983 --- a/include/asm-generic/vmlinux.lds.h
58984 +++ b/include/asm-generic/vmlinux.lds.h
58985 @@ -217,6 +217,7 @@
58986 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
58987 VMLINUX_SYMBOL(__start_rodata) = .; \
58988 *(.rodata) *(.rodata.*) \
58989 + *(.data..read_only) \
58990 *(__vermagic) /* Kernel version magic */ \
58991 . = ALIGN(8); \
58992 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
58993 @@ -722,17 +723,18 @@
58994 * section in the linker script will go there too. @phdr should have
58995 * a leading colon.
58996 *
58997 - * Note that this macros defines __per_cpu_load as an absolute symbol.
58998 + * Note that this macros defines per_cpu_load as an absolute symbol.
58999 * If there is no need to put the percpu section at a predetermined
59000 * address, use PERCPU_SECTION.
59001 */
59002 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59003 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59004 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59005 + per_cpu_load = .; \
59006 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59007 - LOAD_OFFSET) { \
59008 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59009 PERCPU_INPUT(cacheline) \
59010 } phdr \
59011 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59012 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59013
59014 /**
59015 * PERCPU_SECTION - define output section for percpu area, simple version
59016 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59017 index 92f0981..d44a37c 100644
59018 --- a/include/drm/drmP.h
59019 +++ b/include/drm/drmP.h
59020 @@ -72,6 +72,7 @@
59021 #include <linux/workqueue.h>
59022 #include <linux/poll.h>
59023 #include <asm/pgalloc.h>
59024 +#include <asm/local.h>
59025 #include "drm.h"
59026
59027 #include <linux/idr.h>
59028 @@ -1038,7 +1039,7 @@ struct drm_device {
59029
59030 /** \name Usage Counters */
59031 /*@{ */
59032 - int open_count; /**< Outstanding files open */
59033 + local_t open_count; /**< Outstanding files open */
59034 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59035 atomic_t vma_count; /**< Outstanding vma areas open */
59036 int buf_use; /**< Buffers in use -- cannot alloc */
59037 @@ -1049,7 +1050,7 @@ struct drm_device {
59038 /*@{ */
59039 unsigned long counters;
59040 enum drm_stat_type types[15];
59041 - atomic_t counts[15];
59042 + atomic_unchecked_t counts[15];
59043 /*@} */
59044
59045 struct list_head filelist;
59046 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59047 index 37515d1..34fa8b0 100644
59048 --- a/include/drm/drm_crtc_helper.h
59049 +++ b/include/drm/drm_crtc_helper.h
59050 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59051
59052 /* disable crtc when not in use - more explicit than dpms off */
59053 void (*disable)(struct drm_crtc *crtc);
59054 -};
59055 +} __no_const;
59056
59057 struct drm_encoder_helper_funcs {
59058 void (*dpms)(struct drm_encoder *encoder, int mode);
59059 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59060 struct drm_connector *connector);
59061 /* disable encoder when not in use - more explicit than dpms off */
59062 void (*disable)(struct drm_encoder *encoder);
59063 -};
59064 +} __no_const;
59065
59066 struct drm_connector_helper_funcs {
59067 int (*get_modes)(struct drm_connector *connector);
59068 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59069 index 26c1f78..6722682 100644
59070 --- a/include/drm/ttm/ttm_memory.h
59071 +++ b/include/drm/ttm/ttm_memory.h
59072 @@ -47,7 +47,7 @@
59073
59074 struct ttm_mem_shrink {
59075 int (*do_shrink) (struct ttm_mem_shrink *);
59076 -};
59077 +} __no_const;
59078
59079 /**
59080 * struct ttm_mem_global - Global memory accounting structure.
59081 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59082 index e86dfca..40cc55f 100644
59083 --- a/include/linux/a.out.h
59084 +++ b/include/linux/a.out.h
59085 @@ -39,6 +39,14 @@ enum machine_type {
59086 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59087 };
59088
59089 +/* Constants for the N_FLAGS field */
59090 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59091 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59092 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59093 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59094 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59095 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59096 +
59097 #if !defined (N_MAGIC)
59098 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59099 #endif
59100 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59101 index f4ff882..84b53a6 100644
59102 --- a/include/linux/atmdev.h
59103 +++ b/include/linux/atmdev.h
59104 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59105 #endif
59106
59107 struct k_atm_aal_stats {
59108 -#define __HANDLE_ITEM(i) atomic_t i
59109 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59110 __AAL_STAT_ITEMS
59111 #undef __HANDLE_ITEM
59112 };
59113 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59114 index 0092102..8a801b4 100644
59115 --- a/include/linux/binfmts.h
59116 +++ b/include/linux/binfmts.h
59117 @@ -89,6 +89,7 @@ struct linux_binfmt {
59118 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59119 int (*load_shlib)(struct file *);
59120 int (*core_dump)(struct coredump_params *cprm);
59121 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59122 unsigned long min_coredump; /* minimal dump size */
59123 };
59124
59125 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59126 index 606cf33..b72c577 100644
59127 --- a/include/linux/blkdev.h
59128 +++ b/include/linux/blkdev.h
59129 @@ -1379,7 +1379,7 @@ struct block_device_operations {
59130 /* this callback is with swap_lock and sometimes page table lock held */
59131 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59132 struct module *owner;
59133 -};
59134 +} __do_const;
59135
59136 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59137 unsigned long);
59138 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59139 index 4d1a074..88f929a 100644
59140 --- a/include/linux/blktrace_api.h
59141 +++ b/include/linux/blktrace_api.h
59142 @@ -162,7 +162,7 @@ struct blk_trace {
59143 struct dentry *dir;
59144 struct dentry *dropped_file;
59145 struct dentry *msg_file;
59146 - atomic_t dropped;
59147 + atomic_unchecked_t dropped;
59148 };
59149
59150 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59151 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59152 index 83195fb..0b0f77d 100644
59153 --- a/include/linux/byteorder/little_endian.h
59154 +++ b/include/linux/byteorder/little_endian.h
59155 @@ -42,51 +42,51 @@
59156
59157 static inline __le64 __cpu_to_le64p(const __u64 *p)
59158 {
59159 - return (__force __le64)*p;
59160 + return (__force const __le64)*p;
59161 }
59162 static inline __u64 __le64_to_cpup(const __le64 *p)
59163 {
59164 - return (__force __u64)*p;
59165 + return (__force const __u64)*p;
59166 }
59167 static inline __le32 __cpu_to_le32p(const __u32 *p)
59168 {
59169 - return (__force __le32)*p;
59170 + return (__force const __le32)*p;
59171 }
59172 static inline __u32 __le32_to_cpup(const __le32 *p)
59173 {
59174 - return (__force __u32)*p;
59175 + return (__force const __u32)*p;
59176 }
59177 static inline __le16 __cpu_to_le16p(const __u16 *p)
59178 {
59179 - return (__force __le16)*p;
59180 + return (__force const __le16)*p;
59181 }
59182 static inline __u16 __le16_to_cpup(const __le16 *p)
59183 {
59184 - return (__force __u16)*p;
59185 + return (__force const __u16)*p;
59186 }
59187 static inline __be64 __cpu_to_be64p(const __u64 *p)
59188 {
59189 - return (__force __be64)__swab64p(p);
59190 + return (__force const __be64)__swab64p(p);
59191 }
59192 static inline __u64 __be64_to_cpup(const __be64 *p)
59193 {
59194 - return __swab64p((__u64 *)p);
59195 + return __swab64p((const __u64 *)p);
59196 }
59197 static inline __be32 __cpu_to_be32p(const __u32 *p)
59198 {
59199 - return (__force __be32)__swab32p(p);
59200 + return (__force const __be32)__swab32p(p);
59201 }
59202 static inline __u32 __be32_to_cpup(const __be32 *p)
59203 {
59204 - return __swab32p((__u32 *)p);
59205 + return __swab32p((const __u32 *)p);
59206 }
59207 static inline __be16 __cpu_to_be16p(const __u16 *p)
59208 {
59209 - return (__force __be16)__swab16p(p);
59210 + return (__force const __be16)__swab16p(p);
59211 }
59212 static inline __u16 __be16_to_cpup(const __be16 *p)
59213 {
59214 - return __swab16p((__u16 *)p);
59215 + return __swab16p((const __u16 *)p);
59216 }
59217 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59218 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59219 diff --git a/include/linux/cache.h b/include/linux/cache.h
59220 index 4c57065..4307975 100644
59221 --- a/include/linux/cache.h
59222 +++ b/include/linux/cache.h
59223 @@ -16,6 +16,10 @@
59224 #define __read_mostly
59225 #endif
59226
59227 +#ifndef __read_only
59228 +#define __read_only __read_mostly
59229 +#endif
59230 +
59231 #ifndef ____cacheline_aligned
59232 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59233 #endif
59234 diff --git a/include/linux/capability.h b/include/linux/capability.h
59235 index 12d52de..b5f7fa7 100644
59236 --- a/include/linux/capability.h
59237 +++ b/include/linux/capability.h
59238 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59239 extern bool capable(int cap);
59240 extern bool ns_capable(struct user_namespace *ns, int cap);
59241 extern bool nsown_capable(int cap);
59242 +extern bool capable_nolog(int cap);
59243 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59244
59245 /* audit system wants to get cap info from files as well */
59246 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59247 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59248 index 04ffb2e..6799180 100644
59249 --- a/include/linux/cleancache.h
59250 +++ b/include/linux/cleancache.h
59251 @@ -31,7 +31,7 @@ struct cleancache_ops {
59252 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
59253 void (*flush_inode)(int, struct cleancache_filekey);
59254 void (*flush_fs)(int);
59255 -};
59256 +} __no_const;
59257
59258 extern struct cleancache_ops
59259 cleancache_register_ops(struct cleancache_ops *ops);
59260 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59261 index 2f40791..89a56fd 100644
59262 --- a/include/linux/compiler-gcc4.h
59263 +++ b/include/linux/compiler-gcc4.h
59264 @@ -32,6 +32,12 @@
59265 #define __linktime_error(message) __attribute__((__error__(message)))
59266
59267 #if __GNUC_MINOR__ >= 5
59268 +
59269 +#ifdef CONSTIFY_PLUGIN
59270 +#define __no_const __attribute__((no_const))
59271 +#define __do_const __attribute__((do_const))
59272 +#endif
59273 +
59274 /*
59275 * Mark a position in code as unreachable. This can be used to
59276 * suppress control flow warnings after asm blocks that transfer
59277 @@ -47,6 +53,11 @@
59278 #define __noclone __attribute__((__noclone__))
59279
59280 #endif
59281 +
59282 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59283 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59284 +#define __bos0(ptr) __bos((ptr), 0)
59285 +#define __bos1(ptr) __bos((ptr), 1)
59286 #endif
59287
59288 #if __GNUC_MINOR__ > 0
59289 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59290 index 4a24354..9570c1b 100644
59291 --- a/include/linux/compiler.h
59292 +++ b/include/linux/compiler.h
59293 @@ -5,31 +5,62 @@
59294
59295 #ifdef __CHECKER__
59296 # define __user __attribute__((noderef, address_space(1)))
59297 +# define __force_user __force __user
59298 # define __kernel __attribute__((address_space(0)))
59299 +# define __force_kernel __force __kernel
59300 # define __safe __attribute__((safe))
59301 # define __force __attribute__((force))
59302 # define __nocast __attribute__((nocast))
59303 # define __iomem __attribute__((noderef, address_space(2)))
59304 +# define __force_iomem __force __iomem
59305 # define __acquires(x) __attribute__((context(x,0,1)))
59306 # define __releases(x) __attribute__((context(x,1,0)))
59307 # define __acquire(x) __context__(x,1)
59308 # define __release(x) __context__(x,-1)
59309 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59310 # define __percpu __attribute__((noderef, address_space(3)))
59311 +# define __force_percpu __force __percpu
59312 #ifdef CONFIG_SPARSE_RCU_POINTER
59313 # define __rcu __attribute__((noderef, address_space(4)))
59314 +# define __force_rcu __force __rcu
59315 #else
59316 # define __rcu
59317 +# define __force_rcu
59318 #endif
59319 extern void __chk_user_ptr(const volatile void __user *);
59320 extern void __chk_io_ptr(const volatile void __iomem *);
59321 +#elif defined(CHECKER_PLUGIN)
59322 +//# define __user
59323 +//# define __force_user
59324 +//# define __kernel
59325 +//# define __force_kernel
59326 +# define __safe
59327 +# define __force
59328 +# define __nocast
59329 +# define __iomem
59330 +# define __force_iomem
59331 +# define __chk_user_ptr(x) (void)0
59332 +# define __chk_io_ptr(x) (void)0
59333 +# define __builtin_warning(x, y...) (1)
59334 +# define __acquires(x)
59335 +# define __releases(x)
59336 +# define __acquire(x) (void)0
59337 +# define __release(x) (void)0
59338 +# define __cond_lock(x,c) (c)
59339 +# define __percpu
59340 +# define __force_percpu
59341 +# define __rcu
59342 +# define __force_rcu
59343 #else
59344 # define __user
59345 +# define __force_user
59346 # define __kernel
59347 +# define __force_kernel
59348 # define __safe
59349 # define __force
59350 # define __nocast
59351 # define __iomem
59352 +# define __force_iomem
59353 # define __chk_user_ptr(x) (void)0
59354 # define __chk_io_ptr(x) (void)0
59355 # define __builtin_warning(x, y...) (1)
59356 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59357 # define __release(x) (void)0
59358 # define __cond_lock(x,c) (c)
59359 # define __percpu
59360 +# define __force_percpu
59361 # define __rcu
59362 +# define __force_rcu
59363 #endif
59364
59365 #ifdef __KERNEL__
59366 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59367 # define __attribute_const__ /* unimplemented */
59368 #endif
59369
59370 +#ifndef __no_const
59371 +# define __no_const
59372 +#endif
59373 +
59374 +#ifndef __do_const
59375 +# define __do_const
59376 +#endif
59377 +
59378 /*
59379 * Tell gcc if a function is cold. The compiler will assume any path
59380 * directly leading to the call is unlikely.
59381 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59382 #define __cold
59383 #endif
59384
59385 +#ifndef __alloc_size
59386 +#define __alloc_size(...)
59387 +#endif
59388 +
59389 +#ifndef __bos
59390 +#define __bos(ptr, arg)
59391 +#endif
59392 +
59393 +#ifndef __bos0
59394 +#define __bos0(ptr)
59395 +#endif
59396 +
59397 +#ifndef __bos1
59398 +#define __bos1(ptr)
59399 +#endif
59400 +
59401 /* Simple shorthand for a section definition */
59402 #ifndef __section
59403 # define __section(S) __attribute__ ((__section__(#S)))
59404 @@ -308,6 +365,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59405 * use is to mediate communication between process-level code and irq/NMI
59406 * handlers, all running on the same CPU.
59407 */
59408 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59409 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59410 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59411
59412 #endif /* __LINUX_COMPILER_H */
59413 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
59414 index e9eaec5..bfeb9bb 100644
59415 --- a/include/linux/cpuset.h
59416 +++ b/include/linux/cpuset.h
59417 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
59418 * nodemask.
59419 */
59420 smp_mb();
59421 - --ACCESS_ONCE(current->mems_allowed_change_disable);
59422 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
59423 }
59424
59425 static inline void set_mems_allowed(nodemask_t nodemask)
59426 diff --git a/include/linux/cred.h b/include/linux/cred.h
59427 index adadf71..6af5560 100644
59428 --- a/include/linux/cred.h
59429 +++ b/include/linux/cred.h
59430 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59431 static inline void validate_process_creds(void)
59432 {
59433 }
59434 +static inline void validate_task_creds(struct task_struct *task)
59435 +{
59436 +}
59437 #endif
59438
59439 /**
59440 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59441 index 8a94217..15d49e3 100644
59442 --- a/include/linux/crypto.h
59443 +++ b/include/linux/crypto.h
59444 @@ -365,7 +365,7 @@ struct cipher_tfm {
59445 const u8 *key, unsigned int keylen);
59446 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59447 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59448 -};
59449 +} __no_const;
59450
59451 struct hash_tfm {
59452 int (*init)(struct hash_desc *desc);
59453 @@ -386,13 +386,13 @@ struct compress_tfm {
59454 int (*cot_decompress)(struct crypto_tfm *tfm,
59455 const u8 *src, unsigned int slen,
59456 u8 *dst, unsigned int *dlen);
59457 -};
59458 +} __no_const;
59459
59460 struct rng_tfm {
59461 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59462 unsigned int dlen);
59463 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59464 -};
59465 +} __no_const;
59466
59467 #define crt_ablkcipher crt_u.ablkcipher
59468 #define crt_aead crt_u.aead
59469 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59470 index 7925bf0..d5143d2 100644
59471 --- a/include/linux/decompress/mm.h
59472 +++ b/include/linux/decompress/mm.h
59473 @@ -77,7 +77,7 @@ static void free(void *where)
59474 * warnings when not needed (indeed large_malloc / large_free are not
59475 * needed by inflate */
59476
59477 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59478 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59479 #define free(a) kfree(a)
59480
59481 #define large_malloc(a) vmalloc(a)
59482 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59483 index e13117c..e9fc938 100644
59484 --- a/include/linux/dma-mapping.h
59485 +++ b/include/linux/dma-mapping.h
59486 @@ -46,7 +46,7 @@ struct dma_map_ops {
59487 u64 (*get_required_mask)(struct device *dev);
59488 #endif
59489 int is_phys;
59490 -};
59491 +} __do_const;
59492
59493 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59494
59495 diff --git a/include/linux/efi.h b/include/linux/efi.h
59496 index 37c3007..92ab679 100644
59497 --- a/include/linux/efi.h
59498 +++ b/include/linux/efi.h
59499 @@ -580,7 +580,7 @@ struct efivar_operations {
59500 efi_get_variable_t *get_variable;
59501 efi_get_next_variable_t *get_next_variable;
59502 efi_set_variable_t *set_variable;
59503 -};
59504 +} __no_const;
59505
59506 struct efivars {
59507 /*
59508 diff --git a/include/linux/elf.h b/include/linux/elf.h
59509 index 999b4f5..57753b4 100644
59510 --- a/include/linux/elf.h
59511 +++ b/include/linux/elf.h
59512 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59513 #define PT_GNU_EH_FRAME 0x6474e550
59514
59515 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59516 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59517 +
59518 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59519 +
59520 +/* Constants for the e_flags field */
59521 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59522 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59523 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59524 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59525 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59526 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59527
59528 /*
59529 * Extended Numbering
59530 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59531 #define DT_DEBUG 21
59532 #define DT_TEXTREL 22
59533 #define DT_JMPREL 23
59534 +#define DT_FLAGS 30
59535 + #define DF_TEXTREL 0x00000004
59536 #define DT_ENCODING 32
59537 #define OLD_DT_LOOS 0x60000000
59538 #define DT_LOOS 0x6000000d
59539 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59540 #define PF_W 0x2
59541 #define PF_X 0x1
59542
59543 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59544 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59545 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59546 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59547 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59548 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59549 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59550 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59551 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59552 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59553 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59554 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59555 +
59556 typedef struct elf32_phdr{
59557 Elf32_Word p_type;
59558 Elf32_Off p_offset;
59559 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59560 #define EI_OSABI 7
59561 #define EI_PAD 8
59562
59563 +#define EI_PAX 14
59564 +
59565 #define ELFMAG0 0x7f /* EI_MAG */
59566 #define ELFMAG1 'E'
59567 #define ELFMAG2 'L'
59568 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59569 #define elf_note elf32_note
59570 #define elf_addr_t Elf32_Off
59571 #define Elf_Half Elf32_Half
59572 +#define elf_dyn Elf32_Dyn
59573
59574 #else
59575
59576 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
59577 #define elf_note elf64_note
59578 #define elf_addr_t Elf64_Off
59579 #define Elf_Half Elf64_Half
59580 +#define elf_dyn Elf64_Dyn
59581
59582 #endif
59583
59584 diff --git a/include/linux/filter.h b/include/linux/filter.h
59585 index 8eeb205..d59bfa2 100644
59586 --- a/include/linux/filter.h
59587 +++ b/include/linux/filter.h
59588 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59589
59590 struct sk_buff;
59591 struct sock;
59592 +struct bpf_jit_work;
59593
59594 struct sk_filter
59595 {
59596 @@ -141,6 +142,9 @@ struct sk_filter
59597 unsigned int len; /* Number of filter blocks */
59598 unsigned int (*bpf_func)(const struct sk_buff *skb,
59599 const struct sock_filter *filter);
59600 +#ifdef CONFIG_BPF_JIT
59601 + struct bpf_jit_work *work;
59602 +#endif
59603 struct rcu_head rcu;
59604 struct sock_filter insns[0];
59605 };
59606 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59607 index 84ccf8e..2e9b14c 100644
59608 --- a/include/linux/firewire.h
59609 +++ b/include/linux/firewire.h
59610 @@ -428,7 +428,7 @@ struct fw_iso_context {
59611 union {
59612 fw_iso_callback_t sc;
59613 fw_iso_mc_callback_t mc;
59614 - } callback;
59615 + } __no_const callback;
59616 void *callback_data;
59617 };
59618
59619 diff --git a/include/linux/fs.h b/include/linux/fs.h
59620 index 69cd5bb..58425c2 100644
59621 --- a/include/linux/fs.h
59622 +++ b/include/linux/fs.h
59623 @@ -1623,7 +1623,8 @@ struct file_operations {
59624 int (*setlease)(struct file *, long, struct file_lock **);
59625 long (*fallocate)(struct file *file, int mode, loff_t offset,
59626 loff_t len);
59627 -};
59628 +} __do_const;
59629 +typedef struct file_operations __no_const file_operations_no_const;
59630
59631 struct inode_operations {
59632 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59633 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59634 index 003dc0f..3c4ea97 100644
59635 --- a/include/linux/fs_struct.h
59636 +++ b/include/linux/fs_struct.h
59637 @@ -6,7 +6,7 @@
59638 #include <linux/seqlock.h>
59639
59640 struct fs_struct {
59641 - int users;
59642 + atomic_t users;
59643 spinlock_t lock;
59644 seqcount_t seq;
59645 int umask;
59646 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
59647 index ce31408..b1ad003 100644
59648 --- a/include/linux/fscache-cache.h
59649 +++ b/include/linux/fscache-cache.h
59650 @@ -102,7 +102,7 @@ struct fscache_operation {
59651 fscache_operation_release_t release;
59652 };
59653
59654 -extern atomic_t fscache_op_debug_id;
59655 +extern atomic_unchecked_t fscache_op_debug_id;
59656 extern void fscache_op_work_func(struct work_struct *work);
59657
59658 extern void fscache_enqueue_operation(struct fscache_operation *);
59659 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
59660 {
59661 INIT_WORK(&op->work, fscache_op_work_func);
59662 atomic_set(&op->usage, 1);
59663 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59664 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59665 op->processor = processor;
59666 op->release = release;
59667 INIT_LIST_HEAD(&op->pend_link);
59668 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
59669 index 2a53f10..0187fdf 100644
59670 --- a/include/linux/fsnotify.h
59671 +++ b/include/linux/fsnotify.h
59672 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
59673 */
59674 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
59675 {
59676 - return kstrdup(name, GFP_KERNEL);
59677 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
59678 }
59679
59680 /*
59681 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
59682 index 91d0e0a3..035666b 100644
59683 --- a/include/linux/fsnotify_backend.h
59684 +++ b/include/linux/fsnotify_backend.h
59685 @@ -105,6 +105,7 @@ struct fsnotify_ops {
59686 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
59687 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
59688 };
59689 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
59690
59691 /*
59692 * A group is a "thing" that wants to receive notification about filesystem
59693 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
59694 index c3da42d..c70e0df 100644
59695 --- a/include/linux/ftrace_event.h
59696 +++ b/include/linux/ftrace_event.h
59697 @@ -97,7 +97,7 @@ struct trace_event_functions {
59698 trace_print_func raw;
59699 trace_print_func hex;
59700 trace_print_func binary;
59701 -};
59702 +} __no_const;
59703
59704 struct trace_event {
59705 struct hlist_node node;
59706 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
59707 extern int trace_add_event_call(struct ftrace_event_call *call);
59708 extern void trace_remove_event_call(struct ftrace_event_call *call);
59709
59710 -#define is_signed_type(type) (((type)(-1)) < 0)
59711 +#define is_signed_type(type) (((type)(-1)) < (type)1)
59712
59713 int trace_set_clr_event(const char *system, const char *event, int set);
59714
59715 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
59716 index e61d319..0da8505 100644
59717 --- a/include/linux/genhd.h
59718 +++ b/include/linux/genhd.h
59719 @@ -185,7 +185,7 @@ struct gendisk {
59720 struct kobject *slave_dir;
59721
59722 struct timer_rand_state *random;
59723 - atomic_t sync_io; /* RAID */
59724 + atomic_unchecked_t sync_io; /* RAID */
59725 struct disk_events *ev;
59726 #ifdef CONFIG_BLK_DEV_INTEGRITY
59727 struct blk_integrity *integrity;
59728 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
59729 new file mode 100644
59730 index 0000000..8a130b6
59731 --- /dev/null
59732 +++ b/include/linux/gracl.h
59733 @@ -0,0 +1,319 @@
59734 +#ifndef GR_ACL_H
59735 +#define GR_ACL_H
59736 +
59737 +#include <linux/grdefs.h>
59738 +#include <linux/resource.h>
59739 +#include <linux/capability.h>
59740 +#include <linux/dcache.h>
59741 +#include <asm/resource.h>
59742 +
59743 +/* Major status information */
59744 +
59745 +#define GR_VERSION "grsecurity 2.9"
59746 +#define GRSECURITY_VERSION 0x2900
59747 +
59748 +enum {
59749 + GR_SHUTDOWN = 0,
59750 + GR_ENABLE = 1,
59751 + GR_SPROLE = 2,
59752 + GR_RELOAD = 3,
59753 + GR_SEGVMOD = 4,
59754 + GR_STATUS = 5,
59755 + GR_UNSPROLE = 6,
59756 + GR_PASSSET = 7,
59757 + GR_SPROLEPAM = 8,
59758 +};
59759 +
59760 +/* Password setup definitions
59761 + * kernel/grhash.c */
59762 +enum {
59763 + GR_PW_LEN = 128,
59764 + GR_SALT_LEN = 16,
59765 + GR_SHA_LEN = 32,
59766 +};
59767 +
59768 +enum {
59769 + GR_SPROLE_LEN = 64,
59770 +};
59771 +
59772 +enum {
59773 + GR_NO_GLOB = 0,
59774 + GR_REG_GLOB,
59775 + GR_CREATE_GLOB
59776 +};
59777 +
59778 +#define GR_NLIMITS 32
59779 +
59780 +/* Begin Data Structures */
59781 +
59782 +struct sprole_pw {
59783 + unsigned char *rolename;
59784 + unsigned char salt[GR_SALT_LEN];
59785 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
59786 +};
59787 +
59788 +struct name_entry {
59789 + __u32 key;
59790 + ino_t inode;
59791 + dev_t device;
59792 + char *name;
59793 + __u16 len;
59794 + __u8 deleted;
59795 + struct name_entry *prev;
59796 + struct name_entry *next;
59797 +};
59798 +
59799 +struct inodev_entry {
59800 + struct name_entry *nentry;
59801 + struct inodev_entry *prev;
59802 + struct inodev_entry *next;
59803 +};
59804 +
59805 +struct acl_role_db {
59806 + struct acl_role_label **r_hash;
59807 + __u32 r_size;
59808 +};
59809 +
59810 +struct inodev_db {
59811 + struct inodev_entry **i_hash;
59812 + __u32 i_size;
59813 +};
59814 +
59815 +struct name_db {
59816 + struct name_entry **n_hash;
59817 + __u32 n_size;
59818 +};
59819 +
59820 +struct crash_uid {
59821 + uid_t uid;
59822 + unsigned long expires;
59823 +};
59824 +
59825 +struct gr_hash_struct {
59826 + void **table;
59827 + void **nametable;
59828 + void *first;
59829 + __u32 table_size;
59830 + __u32 used_size;
59831 + int type;
59832 +};
59833 +
59834 +/* Userspace Grsecurity ACL data structures */
59835 +
59836 +struct acl_subject_label {
59837 + char *filename;
59838 + ino_t inode;
59839 + dev_t device;
59840 + __u32 mode;
59841 + kernel_cap_t cap_mask;
59842 + kernel_cap_t cap_lower;
59843 + kernel_cap_t cap_invert_audit;
59844 +
59845 + struct rlimit res[GR_NLIMITS];
59846 + __u32 resmask;
59847 +
59848 + __u8 user_trans_type;
59849 + __u8 group_trans_type;
59850 + uid_t *user_transitions;
59851 + gid_t *group_transitions;
59852 + __u16 user_trans_num;
59853 + __u16 group_trans_num;
59854 +
59855 + __u32 sock_families[2];
59856 + __u32 ip_proto[8];
59857 + __u32 ip_type;
59858 + struct acl_ip_label **ips;
59859 + __u32 ip_num;
59860 + __u32 inaddr_any_override;
59861 +
59862 + __u32 crashes;
59863 + unsigned long expires;
59864 +
59865 + struct acl_subject_label *parent_subject;
59866 + struct gr_hash_struct *hash;
59867 + struct acl_subject_label *prev;
59868 + struct acl_subject_label *next;
59869 +
59870 + struct acl_object_label **obj_hash;
59871 + __u32 obj_hash_size;
59872 + __u16 pax_flags;
59873 +};
59874 +
59875 +struct role_allowed_ip {
59876 + __u32 addr;
59877 + __u32 netmask;
59878 +
59879 + struct role_allowed_ip *prev;
59880 + struct role_allowed_ip *next;
59881 +};
59882 +
59883 +struct role_transition {
59884 + char *rolename;
59885 +
59886 + struct role_transition *prev;
59887 + struct role_transition *next;
59888 +};
59889 +
59890 +struct acl_role_label {
59891 + char *rolename;
59892 + uid_t uidgid;
59893 + __u16 roletype;
59894 +
59895 + __u16 auth_attempts;
59896 + unsigned long expires;
59897 +
59898 + struct acl_subject_label *root_label;
59899 + struct gr_hash_struct *hash;
59900 +
59901 + struct acl_role_label *prev;
59902 + struct acl_role_label *next;
59903 +
59904 + struct role_transition *transitions;
59905 + struct role_allowed_ip *allowed_ips;
59906 + uid_t *domain_children;
59907 + __u16 domain_child_num;
59908 +
59909 + umode_t umask;
59910 +
59911 + struct acl_subject_label **subj_hash;
59912 + __u32 subj_hash_size;
59913 +};
59914 +
59915 +struct user_acl_role_db {
59916 + struct acl_role_label **r_table;
59917 + __u32 num_pointers; /* Number of allocations to track */
59918 + __u32 num_roles; /* Number of roles */
59919 + __u32 num_domain_children; /* Number of domain children */
59920 + __u32 num_subjects; /* Number of subjects */
59921 + __u32 num_objects; /* Number of objects */
59922 +};
59923 +
59924 +struct acl_object_label {
59925 + char *filename;
59926 + ino_t inode;
59927 + dev_t device;
59928 + __u32 mode;
59929 +
59930 + struct acl_subject_label *nested;
59931 + struct acl_object_label *globbed;
59932 +
59933 + /* next two structures not used */
59934 +
59935 + struct acl_object_label *prev;
59936 + struct acl_object_label *next;
59937 +};
59938 +
59939 +struct acl_ip_label {
59940 + char *iface;
59941 + __u32 addr;
59942 + __u32 netmask;
59943 + __u16 low, high;
59944 + __u8 mode;
59945 + __u32 type;
59946 + __u32 proto[8];
59947 +
59948 + /* next two structures not used */
59949 +
59950 + struct acl_ip_label *prev;
59951 + struct acl_ip_label *next;
59952 +};
59953 +
59954 +struct gr_arg {
59955 + struct user_acl_role_db role_db;
59956 + unsigned char pw[GR_PW_LEN];
59957 + unsigned char salt[GR_SALT_LEN];
59958 + unsigned char sum[GR_SHA_LEN];
59959 + unsigned char sp_role[GR_SPROLE_LEN];
59960 + struct sprole_pw *sprole_pws;
59961 + dev_t segv_device;
59962 + ino_t segv_inode;
59963 + uid_t segv_uid;
59964 + __u16 num_sprole_pws;
59965 + __u16 mode;
59966 +};
59967 +
59968 +struct gr_arg_wrapper {
59969 + struct gr_arg *arg;
59970 + __u32 version;
59971 + __u32 size;
59972 +};
59973 +
59974 +struct subject_map {
59975 + struct acl_subject_label *user;
59976 + struct acl_subject_label *kernel;
59977 + struct subject_map *prev;
59978 + struct subject_map *next;
59979 +};
59980 +
59981 +struct acl_subj_map_db {
59982 + struct subject_map **s_hash;
59983 + __u32 s_size;
59984 +};
59985 +
59986 +/* End Data Structures Section */
59987 +
59988 +/* Hash functions generated by empirical testing by Brad Spengler
59989 + Makes good use of the low bits of the inode. Generally 0-1 times
59990 + in loop for successful match. 0-3 for unsuccessful match.
59991 + Shift/add algorithm with modulus of table size and an XOR*/
59992 +
59993 +static __inline__ unsigned int
59994 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
59995 +{
59996 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
59997 +}
59998 +
59999 + static __inline__ unsigned int
60000 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60001 +{
60002 + return ((const unsigned long)userp % sz);
60003 +}
60004 +
60005 +static __inline__ unsigned int
60006 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60007 +{
60008 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60009 +}
60010 +
60011 +static __inline__ unsigned int
60012 +nhash(const char *name, const __u16 len, const unsigned int sz)
60013 +{
60014 + return full_name_hash((const unsigned char *)name, len) % sz;
60015 +}
60016 +
60017 +#define FOR_EACH_ROLE_START(role) \
60018 + role = role_list; \
60019 + while (role) {
60020 +
60021 +#define FOR_EACH_ROLE_END(role) \
60022 + role = role->prev; \
60023 + }
60024 +
60025 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60026 + subj = NULL; \
60027 + iter = 0; \
60028 + while (iter < role->subj_hash_size) { \
60029 + if (subj == NULL) \
60030 + subj = role->subj_hash[iter]; \
60031 + if (subj == NULL) { \
60032 + iter++; \
60033 + continue; \
60034 + }
60035 +
60036 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60037 + subj = subj->next; \
60038 + if (subj == NULL) \
60039 + iter++; \
60040 + }
60041 +
60042 +
60043 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60044 + subj = role->hash->first; \
60045 + while (subj != NULL) {
60046 +
60047 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60048 + subj = subj->next; \
60049 + }
60050 +
60051 +#endif
60052 +
60053 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60054 new file mode 100644
60055 index 0000000..323ecf2
60056 --- /dev/null
60057 +++ b/include/linux/gralloc.h
60058 @@ -0,0 +1,9 @@
60059 +#ifndef __GRALLOC_H
60060 +#define __GRALLOC_H
60061 +
60062 +void acl_free_all(void);
60063 +int acl_alloc_stack_init(unsigned long size);
60064 +void *acl_alloc(unsigned long len);
60065 +void *acl_alloc_num(unsigned long num, unsigned long len);
60066 +
60067 +#endif
60068 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60069 new file mode 100644
60070 index 0000000..b30e9bc
60071 --- /dev/null
60072 +++ b/include/linux/grdefs.h
60073 @@ -0,0 +1,140 @@
60074 +#ifndef GRDEFS_H
60075 +#define GRDEFS_H
60076 +
60077 +/* Begin grsecurity status declarations */
60078 +
60079 +enum {
60080 + GR_READY = 0x01,
60081 + GR_STATUS_INIT = 0x00 // disabled state
60082 +};
60083 +
60084 +/* Begin ACL declarations */
60085 +
60086 +/* Role flags */
60087 +
60088 +enum {
60089 + GR_ROLE_USER = 0x0001,
60090 + GR_ROLE_GROUP = 0x0002,
60091 + GR_ROLE_DEFAULT = 0x0004,
60092 + GR_ROLE_SPECIAL = 0x0008,
60093 + GR_ROLE_AUTH = 0x0010,
60094 + GR_ROLE_NOPW = 0x0020,
60095 + GR_ROLE_GOD = 0x0040,
60096 + GR_ROLE_LEARN = 0x0080,
60097 + GR_ROLE_TPE = 0x0100,
60098 + GR_ROLE_DOMAIN = 0x0200,
60099 + GR_ROLE_PAM = 0x0400,
60100 + GR_ROLE_PERSIST = 0x0800
60101 +};
60102 +
60103 +/* ACL Subject and Object mode flags */
60104 +enum {
60105 + GR_DELETED = 0x80000000
60106 +};
60107 +
60108 +/* ACL Object-only mode flags */
60109 +enum {
60110 + GR_READ = 0x00000001,
60111 + GR_APPEND = 0x00000002,
60112 + GR_WRITE = 0x00000004,
60113 + GR_EXEC = 0x00000008,
60114 + GR_FIND = 0x00000010,
60115 + GR_INHERIT = 0x00000020,
60116 + GR_SETID = 0x00000040,
60117 + GR_CREATE = 0x00000080,
60118 + GR_DELETE = 0x00000100,
60119 + GR_LINK = 0x00000200,
60120 + GR_AUDIT_READ = 0x00000400,
60121 + GR_AUDIT_APPEND = 0x00000800,
60122 + GR_AUDIT_WRITE = 0x00001000,
60123 + GR_AUDIT_EXEC = 0x00002000,
60124 + GR_AUDIT_FIND = 0x00004000,
60125 + GR_AUDIT_INHERIT= 0x00008000,
60126 + GR_AUDIT_SETID = 0x00010000,
60127 + GR_AUDIT_CREATE = 0x00020000,
60128 + GR_AUDIT_DELETE = 0x00040000,
60129 + GR_AUDIT_LINK = 0x00080000,
60130 + GR_PTRACERD = 0x00100000,
60131 + GR_NOPTRACE = 0x00200000,
60132 + GR_SUPPRESS = 0x00400000,
60133 + GR_NOLEARN = 0x00800000,
60134 + GR_INIT_TRANSFER= 0x01000000
60135 +};
60136 +
60137 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60138 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60139 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60140 +
60141 +/* ACL subject-only mode flags */
60142 +enum {
60143 + GR_KILL = 0x00000001,
60144 + GR_VIEW = 0x00000002,
60145 + GR_PROTECTED = 0x00000004,
60146 + GR_LEARN = 0x00000008,
60147 + GR_OVERRIDE = 0x00000010,
60148 + /* just a placeholder, this mode is only used in userspace */
60149 + GR_DUMMY = 0x00000020,
60150 + GR_PROTSHM = 0x00000040,
60151 + GR_KILLPROC = 0x00000080,
60152 + GR_KILLIPPROC = 0x00000100,
60153 + /* just a placeholder, this mode is only used in userspace */
60154 + GR_NOTROJAN = 0x00000200,
60155 + GR_PROTPROCFD = 0x00000400,
60156 + GR_PROCACCT = 0x00000800,
60157 + GR_RELAXPTRACE = 0x00001000,
60158 + GR_NESTED = 0x00002000,
60159 + GR_INHERITLEARN = 0x00004000,
60160 + GR_PROCFIND = 0x00008000,
60161 + GR_POVERRIDE = 0x00010000,
60162 + GR_KERNELAUTH = 0x00020000,
60163 + GR_ATSECURE = 0x00040000,
60164 + GR_SHMEXEC = 0x00080000
60165 +};
60166 +
60167 +enum {
60168 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60169 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60170 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60171 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60172 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60173 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60174 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60175 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60176 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60177 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60178 +};
60179 +
60180 +enum {
60181 + GR_ID_USER = 0x01,
60182 + GR_ID_GROUP = 0x02,
60183 +};
60184 +
60185 +enum {
60186 + GR_ID_ALLOW = 0x01,
60187 + GR_ID_DENY = 0x02,
60188 +};
60189 +
60190 +#define GR_CRASH_RES 31
60191 +#define GR_UIDTABLE_MAX 500
60192 +
60193 +/* begin resource learning section */
60194 +enum {
60195 + GR_RLIM_CPU_BUMP = 60,
60196 + GR_RLIM_FSIZE_BUMP = 50000,
60197 + GR_RLIM_DATA_BUMP = 10000,
60198 + GR_RLIM_STACK_BUMP = 1000,
60199 + GR_RLIM_CORE_BUMP = 10000,
60200 + GR_RLIM_RSS_BUMP = 500000,
60201 + GR_RLIM_NPROC_BUMP = 1,
60202 + GR_RLIM_NOFILE_BUMP = 5,
60203 + GR_RLIM_MEMLOCK_BUMP = 50000,
60204 + GR_RLIM_AS_BUMP = 500000,
60205 + GR_RLIM_LOCKS_BUMP = 2,
60206 + GR_RLIM_SIGPENDING_BUMP = 5,
60207 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60208 + GR_RLIM_NICE_BUMP = 1,
60209 + GR_RLIM_RTPRIO_BUMP = 1,
60210 + GR_RLIM_RTTIME_BUMP = 1000000
60211 +};
60212 +
60213 +#endif
60214 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60215 new file mode 100644
60216 index 0000000..da390f1
60217 --- /dev/null
60218 +++ b/include/linux/grinternal.h
60219 @@ -0,0 +1,221 @@
60220 +#ifndef __GRINTERNAL_H
60221 +#define __GRINTERNAL_H
60222 +
60223 +#ifdef CONFIG_GRKERNSEC
60224 +
60225 +#include <linux/fs.h>
60226 +#include <linux/mnt_namespace.h>
60227 +#include <linux/nsproxy.h>
60228 +#include <linux/gracl.h>
60229 +#include <linux/grdefs.h>
60230 +#include <linux/grmsg.h>
60231 +
60232 +void gr_add_learn_entry(const char *fmt, ...)
60233 + __attribute__ ((format (printf, 1, 2)));
60234 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60235 + const struct vfsmount *mnt);
60236 +__u32 gr_check_create(const struct dentry *new_dentry,
60237 + const struct dentry *parent,
60238 + const struct vfsmount *mnt, const __u32 mode);
60239 +int gr_check_protected_task(const struct task_struct *task);
60240 +__u32 to_gr_audit(const __u32 reqmode);
60241 +int gr_set_acls(const int type);
60242 +int gr_apply_subject_to_task(struct task_struct *task);
60243 +int gr_acl_is_enabled(void);
60244 +char gr_roletype_to_char(void);
60245 +
60246 +void gr_handle_alertkill(struct task_struct *task);
60247 +char *gr_to_filename(const struct dentry *dentry,
60248 + const struct vfsmount *mnt);
60249 +char *gr_to_filename1(const struct dentry *dentry,
60250 + const struct vfsmount *mnt);
60251 +char *gr_to_filename2(const struct dentry *dentry,
60252 + const struct vfsmount *mnt);
60253 +char *gr_to_filename3(const struct dentry *dentry,
60254 + const struct vfsmount *mnt);
60255 +
60256 +extern int grsec_enable_ptrace_readexec;
60257 +extern int grsec_enable_harden_ptrace;
60258 +extern int grsec_enable_link;
60259 +extern int grsec_enable_fifo;
60260 +extern int grsec_enable_execve;
60261 +extern int grsec_enable_shm;
60262 +extern int grsec_enable_execlog;
60263 +extern int grsec_enable_signal;
60264 +extern int grsec_enable_audit_ptrace;
60265 +extern int grsec_enable_forkfail;
60266 +extern int grsec_enable_time;
60267 +extern int grsec_enable_rofs;
60268 +extern int grsec_enable_chroot_shmat;
60269 +extern int grsec_enable_chroot_mount;
60270 +extern int grsec_enable_chroot_double;
60271 +extern int grsec_enable_chroot_pivot;
60272 +extern int grsec_enable_chroot_chdir;
60273 +extern int grsec_enable_chroot_chmod;
60274 +extern int grsec_enable_chroot_mknod;
60275 +extern int grsec_enable_chroot_fchdir;
60276 +extern int grsec_enable_chroot_nice;
60277 +extern int grsec_enable_chroot_execlog;
60278 +extern int grsec_enable_chroot_caps;
60279 +extern int grsec_enable_chroot_sysctl;
60280 +extern int grsec_enable_chroot_unix;
60281 +extern int grsec_enable_tpe;
60282 +extern int grsec_tpe_gid;
60283 +extern int grsec_enable_tpe_all;
60284 +extern int grsec_enable_tpe_invert;
60285 +extern int grsec_enable_socket_all;
60286 +extern int grsec_socket_all_gid;
60287 +extern int grsec_enable_socket_client;
60288 +extern int grsec_socket_client_gid;
60289 +extern int grsec_enable_socket_server;
60290 +extern int grsec_socket_server_gid;
60291 +extern int grsec_audit_gid;
60292 +extern int grsec_enable_group;
60293 +extern int grsec_enable_audit_textrel;
60294 +extern int grsec_enable_log_rwxmaps;
60295 +extern int grsec_enable_mount;
60296 +extern int grsec_enable_chdir;
60297 +extern int grsec_resource_logging;
60298 +extern int grsec_enable_blackhole;
60299 +extern int grsec_lastack_retries;
60300 +extern int grsec_enable_brute;
60301 +extern int grsec_lock;
60302 +
60303 +extern spinlock_t grsec_alert_lock;
60304 +extern unsigned long grsec_alert_wtime;
60305 +extern unsigned long grsec_alert_fyet;
60306 +
60307 +extern spinlock_t grsec_audit_lock;
60308 +
60309 +extern rwlock_t grsec_exec_file_lock;
60310 +
60311 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60312 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60313 + (tsk)->exec_file->f_vfsmnt) : "/")
60314 +
60315 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60316 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60317 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60318 +
60319 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60320 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60321 + (tsk)->exec_file->f_vfsmnt) : "/")
60322 +
60323 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60324 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60325 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60326 +
60327 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60328 +
60329 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60330 +
60331 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60332 + (task)->pid, (cred)->uid, \
60333 + (cred)->euid, (cred)->gid, (cred)->egid, \
60334 + gr_parent_task_fullpath(task), \
60335 + (task)->real_parent->comm, (task)->real_parent->pid, \
60336 + (pcred)->uid, (pcred)->euid, \
60337 + (pcred)->gid, (pcred)->egid
60338 +
60339 +#define GR_CHROOT_CAPS {{ \
60340 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60341 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60342 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60343 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60344 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60345 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60346 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60347 +
60348 +#define security_learn(normal_msg,args...) \
60349 +({ \
60350 + read_lock(&grsec_exec_file_lock); \
60351 + gr_add_learn_entry(normal_msg "\n", ## args); \
60352 + read_unlock(&grsec_exec_file_lock); \
60353 +})
60354 +
60355 +enum {
60356 + GR_DO_AUDIT,
60357 + GR_DONT_AUDIT,
60358 + /* used for non-audit messages that we shouldn't kill the task on */
60359 + GR_DONT_AUDIT_GOOD
60360 +};
60361 +
60362 +enum {
60363 + GR_TTYSNIFF,
60364 + GR_RBAC,
60365 + GR_RBAC_STR,
60366 + GR_STR_RBAC,
60367 + GR_RBAC_MODE2,
60368 + GR_RBAC_MODE3,
60369 + GR_FILENAME,
60370 + GR_SYSCTL_HIDDEN,
60371 + GR_NOARGS,
60372 + GR_ONE_INT,
60373 + GR_ONE_INT_TWO_STR,
60374 + GR_ONE_STR,
60375 + GR_STR_INT,
60376 + GR_TWO_STR_INT,
60377 + GR_TWO_INT,
60378 + GR_TWO_U64,
60379 + GR_THREE_INT,
60380 + GR_FIVE_INT_TWO_STR,
60381 + GR_TWO_STR,
60382 + GR_THREE_STR,
60383 + GR_FOUR_STR,
60384 + GR_STR_FILENAME,
60385 + GR_FILENAME_STR,
60386 + GR_FILENAME_TWO_INT,
60387 + GR_FILENAME_TWO_INT_STR,
60388 + GR_TEXTREL,
60389 + GR_PTRACE,
60390 + GR_RESOURCE,
60391 + GR_CAP,
60392 + GR_SIG,
60393 + GR_SIG2,
60394 + GR_CRASH1,
60395 + GR_CRASH2,
60396 + GR_PSACCT,
60397 + GR_RWXMAP
60398 +};
60399 +
60400 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60401 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60402 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60403 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60404 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60405 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60406 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60407 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60408 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60409 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60410 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60411 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60412 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60413 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60414 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60415 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60416 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60417 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60418 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60419 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60420 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60421 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60422 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60423 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60424 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60425 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60426 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60427 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60428 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60429 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60430 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60431 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60432 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60433 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60434 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60435 +
60436 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60437 +
60438 +#endif
60439 +
60440 +#endif
60441 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60442 new file mode 100644
60443 index 0000000..ae576a1
60444 --- /dev/null
60445 +++ b/include/linux/grmsg.h
60446 @@ -0,0 +1,109 @@
60447 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60448 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60449 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60450 +#define GR_STOPMOD_MSG "denied modification of module state by "
60451 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60452 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60453 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60454 +#define GR_IOPL_MSG "denied use of iopl() by "
60455 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60456 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60457 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60458 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60459 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60460 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60461 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60462 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60463 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60464 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60465 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60466 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60467 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60468 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60469 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60470 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60471 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60472 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60473 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60474 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60475 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60476 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60477 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60478 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60479 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60480 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60481 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60482 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60483 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60484 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60485 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60486 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60487 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60488 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60489 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60490 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60491 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60492 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60493 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60494 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60495 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60496 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60497 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60498 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60499 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60500 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60501 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60502 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60503 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60504 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60505 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60506 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60507 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60508 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60509 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60510 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60511 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60512 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60513 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60514 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60515 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60516 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60517 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60518 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60519 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60520 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60521 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60522 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60523 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60524 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60525 +#define GR_TIME_MSG "time set by "
60526 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60527 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60528 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60529 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60530 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60531 +#define GR_BIND_MSG "denied bind() by "
60532 +#define GR_CONNECT_MSG "denied connect() by "
60533 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60534 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60535 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60536 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60537 +#define GR_CAP_ACL_MSG "use of %s denied for "
60538 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60539 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60540 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60541 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60542 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60543 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60544 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60545 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60546 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60547 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60548 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60549 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60550 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60551 +#define GR_VM86_MSG "denied use of vm86 by "
60552 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60553 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60554 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60555 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60556 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60557 new file mode 100644
60558 index 0000000..acd05db
60559 --- /dev/null
60560 +++ b/include/linux/grsecurity.h
60561 @@ -0,0 +1,232 @@
60562 +#ifndef GR_SECURITY_H
60563 +#define GR_SECURITY_H
60564 +#include <linux/fs.h>
60565 +#include <linux/fs_struct.h>
60566 +#include <linux/binfmts.h>
60567 +#include <linux/gracl.h>
60568 +
60569 +/* notify of brain-dead configs */
60570 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60571 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60572 +#endif
60573 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60574 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60575 +#endif
60576 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60577 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60578 +#endif
60579 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60580 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60581 +#endif
60582 +
60583 +#include <linux/compat.h>
60584 +
60585 +struct user_arg_ptr {
60586 +#ifdef CONFIG_COMPAT
60587 + bool is_compat;
60588 +#endif
60589 + union {
60590 + const char __user *const __user *native;
60591 +#ifdef CONFIG_COMPAT
60592 + compat_uptr_t __user *compat;
60593 +#endif
60594 + } ptr;
60595 +};
60596 +
60597 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60598 +void gr_handle_brute_check(void);
60599 +void gr_handle_kernel_exploit(void);
60600 +int gr_process_user_ban(void);
60601 +
60602 +char gr_roletype_to_char(void);
60603 +
60604 +int gr_acl_enable_at_secure(void);
60605 +
60606 +int gr_check_user_change(int real, int effective, int fs);
60607 +int gr_check_group_change(int real, int effective, int fs);
60608 +
60609 +void gr_del_task_from_ip_table(struct task_struct *p);
60610 +
60611 +int gr_pid_is_chrooted(struct task_struct *p);
60612 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60613 +int gr_handle_chroot_nice(void);
60614 +int gr_handle_chroot_sysctl(const int op);
60615 +int gr_handle_chroot_setpriority(struct task_struct *p,
60616 + const int niceval);
60617 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60618 +int gr_handle_chroot_chroot(const struct dentry *dentry,
60619 + const struct vfsmount *mnt);
60620 +void gr_handle_chroot_chdir(struct path *path);
60621 +int gr_handle_chroot_chmod(const struct dentry *dentry,
60622 + const struct vfsmount *mnt, const int mode);
60623 +int gr_handle_chroot_mknod(const struct dentry *dentry,
60624 + const struct vfsmount *mnt, const int mode);
60625 +int gr_handle_chroot_mount(const struct dentry *dentry,
60626 + const struct vfsmount *mnt,
60627 + const char *dev_name);
60628 +int gr_handle_chroot_pivot(void);
60629 +int gr_handle_chroot_unix(const pid_t pid);
60630 +
60631 +int gr_handle_rawio(const struct inode *inode);
60632 +
60633 +void gr_handle_ioperm(void);
60634 +void gr_handle_iopl(void);
60635 +
60636 +umode_t gr_acl_umask(void);
60637 +
60638 +int gr_tpe_allow(const struct file *file);
60639 +
60640 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60641 +void gr_clear_chroot_entries(struct task_struct *task);
60642 +
60643 +void gr_log_forkfail(const int retval);
60644 +void gr_log_timechange(void);
60645 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60646 +void gr_log_chdir(const struct dentry *dentry,
60647 + const struct vfsmount *mnt);
60648 +void gr_log_chroot_exec(const struct dentry *dentry,
60649 + const struct vfsmount *mnt);
60650 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
60651 +void gr_log_remount(const char *devname, const int retval);
60652 +void gr_log_unmount(const char *devname, const int retval);
60653 +void gr_log_mount(const char *from, const char *to, const int retval);
60654 +void gr_log_textrel(struct vm_area_struct *vma);
60655 +void gr_log_rwxmmap(struct file *file);
60656 +void gr_log_rwxmprotect(struct file *file);
60657 +
60658 +int gr_handle_follow_link(const struct inode *parent,
60659 + const struct inode *inode,
60660 + const struct dentry *dentry,
60661 + const struct vfsmount *mnt);
60662 +int gr_handle_fifo(const struct dentry *dentry,
60663 + const struct vfsmount *mnt,
60664 + const struct dentry *dir, const int flag,
60665 + const int acc_mode);
60666 +int gr_handle_hardlink(const struct dentry *dentry,
60667 + const struct vfsmount *mnt,
60668 + struct inode *inode,
60669 + const int mode, const char *to);
60670 +
60671 +int gr_is_capable(const int cap);
60672 +int gr_is_capable_nolog(const int cap);
60673 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
60674 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
60675 +
60676 +void gr_learn_resource(const struct task_struct *task, const int limit,
60677 + const unsigned long wanted, const int gt);
60678 +void gr_copy_label(struct task_struct *tsk);
60679 +void gr_handle_crash(struct task_struct *task, const int sig);
60680 +int gr_handle_signal(const struct task_struct *p, const int sig);
60681 +int gr_check_crash_uid(const uid_t uid);
60682 +int gr_check_protected_task(const struct task_struct *task);
60683 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
60684 +int gr_acl_handle_mmap(const struct file *file,
60685 + const unsigned long prot);
60686 +int gr_acl_handle_mprotect(const struct file *file,
60687 + const unsigned long prot);
60688 +int gr_check_hidden_task(const struct task_struct *tsk);
60689 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
60690 + const struct vfsmount *mnt);
60691 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
60692 + const struct vfsmount *mnt);
60693 +__u32 gr_acl_handle_access(const struct dentry *dentry,
60694 + const struct vfsmount *mnt, const int fmode);
60695 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
60696 + const struct vfsmount *mnt, umode_t *mode);
60697 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
60698 + const struct vfsmount *mnt);
60699 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
60700 + const struct vfsmount *mnt);
60701 +int gr_handle_ptrace(struct task_struct *task, const long request);
60702 +int gr_handle_proc_ptrace(struct task_struct *task);
60703 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
60704 + const struct vfsmount *mnt);
60705 +int gr_check_crash_exec(const struct file *filp);
60706 +int gr_acl_is_enabled(void);
60707 +void gr_set_kernel_label(struct task_struct *task);
60708 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
60709 + const gid_t gid);
60710 +int gr_set_proc_label(const struct dentry *dentry,
60711 + const struct vfsmount *mnt,
60712 + const int unsafe_flags);
60713 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
60714 + const struct vfsmount *mnt);
60715 +__u32 gr_acl_handle_open(const struct dentry *dentry,
60716 + const struct vfsmount *mnt, int acc_mode);
60717 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
60718 + const struct dentry *p_dentry,
60719 + const struct vfsmount *p_mnt,
60720 + int open_flags, int acc_mode, const int imode);
60721 +void gr_handle_create(const struct dentry *dentry,
60722 + const struct vfsmount *mnt);
60723 +void gr_handle_proc_create(const struct dentry *dentry,
60724 + const struct inode *inode);
60725 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
60726 + const struct dentry *parent_dentry,
60727 + const struct vfsmount *parent_mnt,
60728 + const int mode);
60729 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
60730 + const struct dentry *parent_dentry,
60731 + const struct vfsmount *parent_mnt);
60732 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
60733 + const struct vfsmount *mnt);
60734 +void gr_handle_delete(const ino_t ino, const dev_t dev);
60735 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
60736 + const struct vfsmount *mnt);
60737 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
60738 + const struct dentry *parent_dentry,
60739 + const struct vfsmount *parent_mnt,
60740 + const char *from);
60741 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
60742 + const struct dentry *parent_dentry,
60743 + const struct vfsmount *parent_mnt,
60744 + const struct dentry *old_dentry,
60745 + const struct vfsmount *old_mnt, const char *to);
60746 +int gr_acl_handle_rename(struct dentry *new_dentry,
60747 + struct dentry *parent_dentry,
60748 + const struct vfsmount *parent_mnt,
60749 + struct dentry *old_dentry,
60750 + struct inode *old_parent_inode,
60751 + struct vfsmount *old_mnt, const char *newname);
60752 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60753 + struct dentry *old_dentry,
60754 + struct dentry *new_dentry,
60755 + struct vfsmount *mnt, const __u8 replace);
60756 +__u32 gr_check_link(const struct dentry *new_dentry,
60757 + const struct dentry *parent_dentry,
60758 + const struct vfsmount *parent_mnt,
60759 + const struct dentry *old_dentry,
60760 + const struct vfsmount *old_mnt);
60761 +int gr_acl_handle_filldir(const struct file *file, const char *name,
60762 + const unsigned int namelen, const ino_t ino);
60763 +
60764 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
60765 + const struct vfsmount *mnt);
60766 +void gr_acl_handle_exit(void);
60767 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
60768 +int gr_acl_handle_procpidmem(const struct task_struct *task);
60769 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
60770 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
60771 +void gr_audit_ptrace(struct task_struct *task);
60772 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
60773 +
60774 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
60775 +
60776 +#ifdef CONFIG_GRKERNSEC
60777 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
60778 +void gr_handle_vm86(void);
60779 +void gr_handle_mem_readwrite(u64 from, u64 to);
60780 +
60781 +void gr_log_badprocpid(const char *entry);
60782 +
60783 +extern int grsec_enable_dmesg;
60784 +extern int grsec_disable_privio;
60785 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60786 +extern int grsec_enable_chroot_findtask;
60787 +#endif
60788 +#ifdef CONFIG_GRKERNSEC_SETXID
60789 +extern int grsec_enable_setxid;
60790 +#endif
60791 +#endif
60792 +
60793 +#endif
60794 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
60795 new file mode 100644
60796 index 0000000..e7ffaaf
60797 --- /dev/null
60798 +++ b/include/linux/grsock.h
60799 @@ -0,0 +1,19 @@
60800 +#ifndef __GRSOCK_H
60801 +#define __GRSOCK_H
60802 +
60803 +extern void gr_attach_curr_ip(const struct sock *sk);
60804 +extern int gr_handle_sock_all(const int family, const int type,
60805 + const int protocol);
60806 +extern int gr_handle_sock_server(const struct sockaddr *sck);
60807 +extern int gr_handle_sock_server_other(const struct sock *sck);
60808 +extern int gr_handle_sock_client(const struct sockaddr *sck);
60809 +extern int gr_search_connect(struct socket * sock,
60810 + struct sockaddr_in * addr);
60811 +extern int gr_search_bind(struct socket * sock,
60812 + struct sockaddr_in * addr);
60813 +extern int gr_search_listen(struct socket * sock);
60814 +extern int gr_search_accept(struct socket * sock);
60815 +extern int gr_search_socket(const int domain, const int type,
60816 + const int protocol);
60817 +
60818 +#endif
60819 diff --git a/include/linux/hid.h b/include/linux/hid.h
60820 index 3a95da6..51986f1 100644
60821 --- a/include/linux/hid.h
60822 +++ b/include/linux/hid.h
60823 @@ -696,7 +696,7 @@ struct hid_ll_driver {
60824 unsigned int code, int value);
60825
60826 int (*parse)(struct hid_device *hdev);
60827 -};
60828 +} __no_const;
60829
60830 #define PM_HINT_FULLON 1<<5
60831 #define PM_HINT_NORMAL 1<<1
60832 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
60833 index 3a93f73..b19d0b3 100644
60834 --- a/include/linux/highmem.h
60835 +++ b/include/linux/highmem.h
60836 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
60837 kunmap_atomic(kaddr, KM_USER0);
60838 }
60839
60840 +static inline void sanitize_highpage(struct page *page)
60841 +{
60842 + void *kaddr;
60843 + unsigned long flags;
60844 +
60845 + local_irq_save(flags);
60846 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
60847 + clear_page(kaddr);
60848 + kunmap_atomic(kaddr, KM_CLEARPAGE);
60849 + local_irq_restore(flags);
60850 +}
60851 +
60852 static inline void zero_user_segments(struct page *page,
60853 unsigned start1, unsigned end1,
60854 unsigned start2, unsigned end2)
60855 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
60856 index 8e25a91..551b161 100644
60857 --- a/include/linux/i2c.h
60858 +++ b/include/linux/i2c.h
60859 @@ -364,6 +364,7 @@ struct i2c_algorithm {
60860 /* To determine what the adapter supports */
60861 u32 (*functionality) (struct i2c_adapter *);
60862 };
60863 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
60864
60865 /*
60866 * i2c_adapter is the structure used to identify a physical i2c bus along
60867 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
60868 index a6deef4..c56a7f2 100644
60869 --- a/include/linux/i2o.h
60870 +++ b/include/linux/i2o.h
60871 @@ -564,7 +564,7 @@ struct i2o_controller {
60872 struct i2o_device *exec; /* Executive */
60873 #if BITS_PER_LONG == 64
60874 spinlock_t context_list_lock; /* lock for context_list */
60875 - atomic_t context_list_counter; /* needed for unique contexts */
60876 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
60877 struct list_head context_list; /* list of context id's
60878 and pointers */
60879 #endif
60880 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
60881 index 58404b0..439ed95 100644
60882 --- a/include/linux/if_team.h
60883 +++ b/include/linux/if_team.h
60884 @@ -64,6 +64,7 @@ struct team_mode_ops {
60885 void (*port_leave)(struct team *team, struct team_port *port);
60886 void (*port_change_mac)(struct team *team, struct team_port *port);
60887 };
60888 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
60889
60890 enum team_option_type {
60891 TEAM_OPTION_TYPE_U32,
60892 @@ -112,7 +113,7 @@ struct team {
60893 struct list_head option_list;
60894
60895 const struct team_mode *mode;
60896 - struct team_mode_ops ops;
60897 + team_mode_ops_no_const ops;
60898 long mode_priv[TEAM_MODE_PRIV_LONGS];
60899 };
60900
60901 diff --git a/include/linux/init.h b/include/linux/init.h
60902 index 6b95109..4aca62c 100644
60903 --- a/include/linux/init.h
60904 +++ b/include/linux/init.h
60905 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
60906
60907 /* Each module must use one module_init(). */
60908 #define module_init(initfn) \
60909 - static inline initcall_t __inittest(void) \
60910 + static inline __used initcall_t __inittest(void) \
60911 { return initfn; } \
60912 int init_module(void) __attribute__((alias(#initfn)));
60913
60914 /* This is only required if you want to be unloadable. */
60915 #define module_exit(exitfn) \
60916 - static inline exitcall_t __exittest(void) \
60917 + static inline __used exitcall_t __exittest(void) \
60918 { return exitfn; } \
60919 void cleanup_module(void) __attribute__((alias(#exitfn)));
60920
60921 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
60922 index 9c66b1a..a3fdded 100644
60923 --- a/include/linux/init_task.h
60924 +++ b/include/linux/init_task.h
60925 @@ -127,6 +127,12 @@ extern struct cred init_cred;
60926
60927 #define INIT_TASK_COMM "swapper"
60928
60929 +#ifdef CONFIG_X86
60930 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
60931 +#else
60932 +#define INIT_TASK_THREAD_INFO
60933 +#endif
60934 +
60935 /*
60936 * INIT_TASK is used to set up the first task table, touch at
60937 * your own risk!. Base=0, limit=0x1fffff (=2MB)
60938 @@ -165,6 +171,7 @@ extern struct cred init_cred;
60939 RCU_INIT_POINTER(.cred, &init_cred), \
60940 .comm = INIT_TASK_COMM, \
60941 .thread = INIT_THREAD, \
60942 + INIT_TASK_THREAD_INFO \
60943 .fs = &init_fs, \
60944 .files = &init_files, \
60945 .signal = &init_signals, \
60946 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
60947 index e6ca56d..8583707 100644
60948 --- a/include/linux/intel-iommu.h
60949 +++ b/include/linux/intel-iommu.h
60950 @@ -296,7 +296,7 @@ struct iommu_flush {
60951 u8 fm, u64 type);
60952 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
60953 unsigned int size_order, u64 type);
60954 -};
60955 +} __no_const;
60956
60957 enum {
60958 SR_DMAR_FECTL_REG,
60959 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
60960 index a64b00e..464d8bc 100644
60961 --- a/include/linux/interrupt.h
60962 +++ b/include/linux/interrupt.h
60963 @@ -441,7 +441,7 @@ enum
60964 /* map softirq index to softirq name. update 'softirq_to_name' in
60965 * kernel/softirq.c when adding a new softirq.
60966 */
60967 -extern char *softirq_to_name[NR_SOFTIRQS];
60968 +extern const char * const softirq_to_name[NR_SOFTIRQS];
60969
60970 /* softirq mask and active fields moved to irq_cpustat_t in
60971 * asm/hardirq.h to get better cache usage. KAO
60972 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
60973
60974 struct softirq_action
60975 {
60976 - void (*action)(struct softirq_action *);
60977 + void (*action)(void);
60978 };
60979
60980 asmlinkage void do_softirq(void);
60981 asmlinkage void __do_softirq(void);
60982 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
60983 +extern void open_softirq(int nr, void (*action)(void));
60984 extern void softirq_init(void);
60985 static inline void __raise_softirq_irqoff(unsigned int nr)
60986 {
60987 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
60988 index 3875719..4cd454c 100644
60989 --- a/include/linux/kallsyms.h
60990 +++ b/include/linux/kallsyms.h
60991 @@ -15,7 +15,8 @@
60992
60993 struct module;
60994
60995 -#ifdef CONFIG_KALLSYMS
60996 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
60997 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60998 /* Lookup the address for a symbol. Returns 0 if not found. */
60999 unsigned long kallsyms_lookup_name(const char *name);
61000
61001 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61002 /* Stupid that this does nothing, but I didn't create this mess. */
61003 #define __print_symbol(fmt, addr)
61004 #endif /*CONFIG_KALLSYMS*/
61005 +#else /* when included by kallsyms.c, vsnprintf.c, or
61006 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61007 +extern void __print_symbol(const char *fmt, unsigned long address);
61008 +extern int sprint_backtrace(char *buffer, unsigned long address);
61009 +extern int sprint_symbol(char *buffer, unsigned long address);
61010 +const char *kallsyms_lookup(unsigned long addr,
61011 + unsigned long *symbolsize,
61012 + unsigned long *offset,
61013 + char **modname, char *namebuf);
61014 +#endif
61015
61016 /* This macro allows us to keep printk typechecking */
61017 static __printf(1, 2)
61018 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61019 index fa39183..40160be 100644
61020 --- a/include/linux/kgdb.h
61021 +++ b/include/linux/kgdb.h
61022 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61023 extern int kgdb_io_module_registered;
61024
61025 extern atomic_t kgdb_setting_breakpoint;
61026 -extern atomic_t kgdb_cpu_doing_single_step;
61027 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61028
61029 extern struct task_struct *kgdb_usethread;
61030 extern struct task_struct *kgdb_contthread;
61031 @@ -251,7 +251,7 @@ struct kgdb_arch {
61032 void (*disable_hw_break)(struct pt_regs *regs);
61033 void (*remove_all_hw_break)(void);
61034 void (*correct_hw_break)(void);
61035 -};
61036 +} __do_const;
61037
61038 /**
61039 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61040 @@ -276,7 +276,7 @@ struct kgdb_io {
61041 void (*pre_exception) (void);
61042 void (*post_exception) (void);
61043 int is_console;
61044 -};
61045 +} __do_const;
61046
61047 extern struct kgdb_arch arch_kgdb_ops;
61048
61049 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61050 index 722f477..eef2a27 100644
61051 --- a/include/linux/kmod.h
61052 +++ b/include/linux/kmod.h
61053 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61054 * usually useless though. */
61055 extern __printf(2, 3)
61056 int __request_module(bool wait, const char *name, ...);
61057 +extern __printf(3, 4)
61058 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61059 #define request_module(mod...) __request_module(true, mod)
61060 #define request_module_nowait(mod...) __request_module(false, mod)
61061 #define try_then_request_module(x, mod...) \
61062 diff --git a/include/linux/kref.h b/include/linux/kref.h
61063 index 9c07dce..a92fa71 100644
61064 --- a/include/linux/kref.h
61065 +++ b/include/linux/kref.h
61066 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61067 static inline int kref_sub(struct kref *kref, unsigned int count,
61068 void (*release)(struct kref *kref))
61069 {
61070 - WARN_ON(release == NULL);
61071 + BUG_ON(release == NULL);
61072
61073 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61074 release(kref);
61075 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61076 index 900c763..43260cf 100644
61077 --- a/include/linux/kvm_host.h
61078 +++ b/include/linux/kvm_host.h
61079 @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61080 void vcpu_load(struct kvm_vcpu *vcpu);
61081 void vcpu_put(struct kvm_vcpu *vcpu);
61082
61083 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61084 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61085 struct module *module);
61086 void kvm_exit(void);
61087
61088 @@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61089 struct kvm_guest_debug *dbg);
61090 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61091
61092 -int kvm_arch_init(void *opaque);
61093 +int kvm_arch_init(const void *opaque);
61094 void kvm_arch_exit(void);
61095
61096 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61097 diff --git a/include/linux/libata.h b/include/linux/libata.h
61098 index cafc09a..d7e7829 100644
61099 --- a/include/linux/libata.h
61100 +++ b/include/linux/libata.h
61101 @@ -909,7 +909,7 @@ struct ata_port_operations {
61102 * fields must be pointers.
61103 */
61104 const struct ata_port_operations *inherits;
61105 -};
61106 +} __do_const;
61107
61108 struct ata_port_info {
61109 unsigned long flags;
61110 diff --git a/include/linux/mca.h b/include/linux/mca.h
61111 index 3797270..7765ede 100644
61112 --- a/include/linux/mca.h
61113 +++ b/include/linux/mca.h
61114 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61115 int region);
61116 void * (*mca_transform_memory)(struct mca_device *,
61117 void *memory);
61118 -};
61119 +} __no_const;
61120
61121 struct mca_bus {
61122 u64 default_dma_mask;
61123 diff --git a/include/linux/memory.h b/include/linux/memory.h
61124 index 1ac7f6e..a5794d0 100644
61125 --- a/include/linux/memory.h
61126 +++ b/include/linux/memory.h
61127 @@ -143,7 +143,7 @@ struct memory_accessor {
61128 size_t count);
61129 ssize_t (*write)(struct memory_accessor *, const char *buf,
61130 off_t offset, size_t count);
61131 -};
61132 +} __no_const;
61133
61134 /*
61135 * Kernel text modification mutex, used for code patching. Users of this lock
61136 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61137 index 9970337..9444122 100644
61138 --- a/include/linux/mfd/abx500.h
61139 +++ b/include/linux/mfd/abx500.h
61140 @@ -188,6 +188,7 @@ struct abx500_ops {
61141 int (*event_registers_startup_state_get) (struct device *, u8 *);
61142 int (*startup_irq_enabled) (struct device *, unsigned int);
61143 };
61144 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61145
61146 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61147 void abx500_remove_ops(struct device *dev);
61148 diff --git a/include/linux/mm.h b/include/linux/mm.h
61149 index 17b27cd..467ba2f 100644
61150 --- a/include/linux/mm.h
61151 +++ b/include/linux/mm.h
61152 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
61153
61154 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61155 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61156 +
61157 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61158 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61159 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61160 +#else
61161 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61162 +#endif
61163 +
61164 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61165 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61166
61167 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
61168 int set_page_dirty_lock(struct page *page);
61169 int clear_page_dirty_for_io(struct page *page);
61170
61171 -/* Is the vma a continuation of the stack vma above it? */
61172 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61173 -{
61174 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61175 -}
61176 -
61177 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61178 - unsigned long addr)
61179 -{
61180 - return (vma->vm_flags & VM_GROWSDOWN) &&
61181 - (vma->vm_start == addr) &&
61182 - !vma_growsdown(vma->vm_prev, addr);
61183 -}
61184 -
61185 -/* Is the vma a continuation of the stack vma below it? */
61186 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61187 -{
61188 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61189 -}
61190 -
61191 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61192 - unsigned long addr)
61193 -{
61194 - return (vma->vm_flags & VM_GROWSUP) &&
61195 - (vma->vm_end == addr) &&
61196 - !vma_growsup(vma->vm_next, addr);
61197 -}
61198 -
61199 extern unsigned long move_page_tables(struct vm_area_struct *vma,
61200 unsigned long old_addr, struct vm_area_struct *new_vma,
61201 unsigned long new_addr, unsigned long len);
61202 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
61203 }
61204 #endif
61205
61206 +#ifdef CONFIG_MMU
61207 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61208 +#else
61209 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61210 +{
61211 + return __pgprot(0);
61212 +}
61213 +#endif
61214 +
61215 int vma_wants_writenotify(struct vm_area_struct *vma);
61216
61217 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61218 @@ -1409,6 +1397,7 @@ out:
61219 }
61220
61221 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61222 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61223
61224 extern unsigned long do_brk(unsigned long, unsigned long);
61225
61226 @@ -1466,6 +1455,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61227 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61228 struct vm_area_struct **pprev);
61229
61230 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61231 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61232 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61233 +
61234 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61235 NULL if none. Assume start_addr < end_addr. */
61236 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61237 @@ -1494,15 +1487,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61238 return vma;
61239 }
61240
61241 -#ifdef CONFIG_MMU
61242 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61243 -#else
61244 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61245 -{
61246 - return __pgprot(0);
61247 -}
61248 -#endif
61249 -
61250 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61251 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61252 unsigned long pfn, unsigned long size, pgprot_t);
61253 @@ -1606,7 +1590,7 @@ extern int unpoison_memory(unsigned long pfn);
61254 extern int sysctl_memory_failure_early_kill;
61255 extern int sysctl_memory_failure_recovery;
61256 extern void shake_page(struct page *p, int access);
61257 -extern atomic_long_t mce_bad_pages;
61258 +extern atomic_long_unchecked_t mce_bad_pages;
61259 extern int soft_offline_page(struct page *page, int flags);
61260
61261 extern void dump_page(struct page *page);
61262 @@ -1637,5 +1621,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61263 static inline bool page_is_guard(struct page *page) { return false; }
61264 #endif /* CONFIG_DEBUG_PAGEALLOC */
61265
61266 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61267 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61268 +#else
61269 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61270 +#endif
61271 +
61272 #endif /* __KERNEL__ */
61273 #endif /* _LINUX_MM_H */
61274 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61275 index 3cc3062..8947a82 100644
61276 --- a/include/linux/mm_types.h
61277 +++ b/include/linux/mm_types.h
61278 @@ -252,6 +252,8 @@ struct vm_area_struct {
61279 #ifdef CONFIG_NUMA
61280 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61281 #endif
61282 +
61283 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61284 };
61285
61286 struct core_thread {
61287 @@ -388,6 +390,24 @@ struct mm_struct {
61288 #ifdef CONFIG_CPUMASK_OFFSTACK
61289 struct cpumask cpumask_allocation;
61290 #endif
61291 +
61292 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61293 + unsigned long pax_flags;
61294 +#endif
61295 +
61296 +#ifdef CONFIG_PAX_DLRESOLVE
61297 + unsigned long call_dl_resolve;
61298 +#endif
61299 +
61300 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61301 + unsigned long call_syscall;
61302 +#endif
61303 +
61304 +#ifdef CONFIG_PAX_ASLR
61305 + unsigned long delta_mmap; /* randomized offset */
61306 + unsigned long delta_stack; /* randomized offset */
61307 +#endif
61308 +
61309 };
61310
61311 static inline void mm_init_cpumask(struct mm_struct *mm)
61312 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61313 index 1d1b1e1..2a13c78 100644
61314 --- a/include/linux/mmu_notifier.h
61315 +++ b/include/linux/mmu_notifier.h
61316 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61317 */
61318 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61319 ({ \
61320 - pte_t __pte; \
61321 + pte_t ___pte; \
61322 struct vm_area_struct *___vma = __vma; \
61323 unsigned long ___address = __address; \
61324 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61325 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61326 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61327 - __pte; \
61328 + ___pte; \
61329 })
61330
61331 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61332 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61333 index 650ba2f..af0a58c 100644
61334 --- a/include/linux/mmzone.h
61335 +++ b/include/linux/mmzone.h
61336 @@ -379,7 +379,7 @@ struct zone {
61337 unsigned long flags; /* zone flags, see below */
61338
61339 /* Zone statistics */
61340 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61341 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61342
61343 /*
61344 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61345 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61346 index 83ac071..2656e0e 100644
61347 --- a/include/linux/mod_devicetable.h
61348 +++ b/include/linux/mod_devicetable.h
61349 @@ -12,7 +12,7 @@
61350 typedef unsigned long kernel_ulong_t;
61351 #endif
61352
61353 -#define PCI_ANY_ID (~0)
61354 +#define PCI_ANY_ID ((__u16)~0)
61355
61356 struct pci_device_id {
61357 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61358 @@ -131,7 +131,7 @@ struct usb_device_id {
61359 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61360 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61361
61362 -#define HID_ANY_ID (~0)
61363 +#define HID_ANY_ID (~0U)
61364
61365 struct hid_device_id {
61366 __u16 bus;
61367 diff --git a/include/linux/module.h b/include/linux/module.h
61368 index 4598bf0..e069d7f 100644
61369 --- a/include/linux/module.h
61370 +++ b/include/linux/module.h
61371 @@ -17,6 +17,7 @@
61372 #include <linux/moduleparam.h>
61373 #include <linux/tracepoint.h>
61374 #include <linux/export.h>
61375 +#include <linux/fs.h>
61376
61377 #include <linux/percpu.h>
61378 #include <asm/module.h>
61379 @@ -275,19 +276,16 @@ struct module
61380 int (*init)(void);
61381
61382 /* If this is non-NULL, vfree after init() returns */
61383 - void *module_init;
61384 + void *module_init_rx, *module_init_rw;
61385
61386 /* Here is the actual code + data, vfree'd on unload. */
61387 - void *module_core;
61388 + void *module_core_rx, *module_core_rw;
61389
61390 /* Here are the sizes of the init and core sections */
61391 - unsigned int init_size, core_size;
61392 + unsigned int init_size_rw, core_size_rw;
61393
61394 /* The size of the executable code in each section. */
61395 - unsigned int init_text_size, core_text_size;
61396 -
61397 - /* Size of RO sections of the module (text+rodata) */
61398 - unsigned int init_ro_size, core_ro_size;
61399 + unsigned int init_size_rx, core_size_rx;
61400
61401 /* Arch-specific module values */
61402 struct mod_arch_specific arch;
61403 @@ -343,6 +341,10 @@ struct module
61404 #ifdef CONFIG_EVENT_TRACING
61405 struct ftrace_event_call **trace_events;
61406 unsigned int num_trace_events;
61407 + struct file_operations trace_id;
61408 + struct file_operations trace_enable;
61409 + struct file_operations trace_format;
61410 + struct file_operations trace_filter;
61411 #endif
61412 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61413 unsigned int num_ftrace_callsites;
61414 @@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
61415 bool is_module_percpu_address(unsigned long addr);
61416 bool is_module_text_address(unsigned long addr);
61417
61418 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61419 +{
61420 +
61421 +#ifdef CONFIG_PAX_KERNEXEC
61422 + if (ktla_ktva(addr) >= (unsigned long)start &&
61423 + ktla_ktva(addr) < (unsigned long)start + size)
61424 + return 1;
61425 +#endif
61426 +
61427 + return ((void *)addr >= start && (void *)addr < start + size);
61428 +}
61429 +
61430 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61431 +{
61432 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61433 +}
61434 +
61435 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61436 +{
61437 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61438 +}
61439 +
61440 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61441 +{
61442 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61443 +}
61444 +
61445 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61446 +{
61447 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61448 +}
61449 +
61450 static inline int within_module_core(unsigned long addr, struct module *mod)
61451 {
61452 - return (unsigned long)mod->module_core <= addr &&
61453 - addr < (unsigned long)mod->module_core + mod->core_size;
61454 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61455 }
61456
61457 static inline int within_module_init(unsigned long addr, struct module *mod)
61458 {
61459 - return (unsigned long)mod->module_init <= addr &&
61460 - addr < (unsigned long)mod->module_init + mod->init_size;
61461 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61462 }
61463
61464 /* Search for module by name: must hold module_mutex. */
61465 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61466 index b2be02e..6a9fdb1 100644
61467 --- a/include/linux/moduleloader.h
61468 +++ b/include/linux/moduleloader.h
61469 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61470 sections. Returns NULL on failure. */
61471 void *module_alloc(unsigned long size);
61472
61473 +#ifdef CONFIG_PAX_KERNEXEC
61474 +void *module_alloc_exec(unsigned long size);
61475 +#else
61476 +#define module_alloc_exec(x) module_alloc(x)
61477 +#endif
61478 +
61479 /* Free memory returned from module_alloc. */
61480 void module_free(struct module *mod, void *module_region);
61481
61482 +#ifdef CONFIG_PAX_KERNEXEC
61483 +void module_free_exec(struct module *mod, void *module_region);
61484 +#else
61485 +#define module_free_exec(x, y) module_free((x), (y))
61486 +#endif
61487 +
61488 /* Apply the given relocation to the (simplified) ELF. Return -error
61489 or 0. */
61490 int apply_relocate(Elf_Shdr *sechdrs,
61491 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61492 index c47f4d6..23f9bdb 100644
61493 --- a/include/linux/moduleparam.h
61494 +++ b/include/linux/moduleparam.h
61495 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
61496 * @len is usually just sizeof(string).
61497 */
61498 #define module_param_string(name, string, len, perm) \
61499 - static const struct kparam_string __param_string_##name \
61500 + static const struct kparam_string __param_string_##name __used \
61501 = { len, string }; \
61502 __module_param_call(MODULE_PARAM_PREFIX, name, \
61503 &param_ops_string, \
61504 @@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
61505 */
61506 #define module_param_array_named(name, array, type, nump, perm) \
61507 param_check_##type(name, &(array)[0]); \
61508 - static const struct kparam_array __param_arr_##name \
61509 + static const struct kparam_array __param_arr_##name __used \
61510 = { .max = ARRAY_SIZE(array), .num = nump, \
61511 .ops = &param_ops_##type, \
61512 .elemsize = sizeof(array[0]), .elem = array }; \
61513 diff --git a/include/linux/namei.h b/include/linux/namei.h
61514 index ffc0213..2c1f2cb 100644
61515 --- a/include/linux/namei.h
61516 +++ b/include/linux/namei.h
61517 @@ -24,7 +24,7 @@ struct nameidata {
61518 unsigned seq;
61519 int last_type;
61520 unsigned depth;
61521 - char *saved_names[MAX_NESTED_LINKS + 1];
61522 + const char *saved_names[MAX_NESTED_LINKS + 1];
61523
61524 /* Intent data */
61525 union {
61526 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61527 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61528 extern void unlock_rename(struct dentry *, struct dentry *);
61529
61530 -static inline void nd_set_link(struct nameidata *nd, char *path)
61531 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61532 {
61533 nd->saved_names[nd->depth] = path;
61534 }
61535
61536 -static inline char *nd_get_link(struct nameidata *nd)
61537 +static inline const char *nd_get_link(const struct nameidata *nd)
61538 {
61539 return nd->saved_names[nd->depth];
61540 }
61541 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61542 index 0eac07c..a59f6a8 100644
61543 --- a/include/linux/netdevice.h
61544 +++ b/include/linux/netdevice.h
61545 @@ -1002,6 +1002,7 @@ struct net_device_ops {
61546 int (*ndo_neigh_construct)(struct neighbour *n);
61547 void (*ndo_neigh_destroy)(struct neighbour *n);
61548 };
61549 +typedef struct net_device_ops __no_const net_device_ops_no_const;
61550
61551 /*
61552 * The DEVICE structure.
61553 @@ -1063,7 +1064,7 @@ struct net_device {
61554 int iflink;
61555
61556 struct net_device_stats stats;
61557 - atomic_long_t rx_dropped; /* dropped packets by core network
61558 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61559 * Do not use this in drivers.
61560 */
61561
61562 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61563 new file mode 100644
61564 index 0000000..33f4af8
61565 --- /dev/null
61566 +++ b/include/linux/netfilter/xt_gradm.h
61567 @@ -0,0 +1,9 @@
61568 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
61569 +#define _LINUX_NETFILTER_XT_GRADM_H 1
61570 +
61571 +struct xt_gradm_mtinfo {
61572 + __u16 flags;
61573 + __u16 invflags;
61574 +};
61575 +
61576 +#endif
61577 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
61578 index c65a18a..0c05f3a 100644
61579 --- a/include/linux/of_pdt.h
61580 +++ b/include/linux/of_pdt.h
61581 @@ -32,7 +32,7 @@ struct of_pdt_ops {
61582
61583 /* return 0 on success; fill in 'len' with number of bytes in path */
61584 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
61585 -};
61586 +} __no_const;
61587
61588 extern void *prom_early_alloc(unsigned long size);
61589
61590 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
61591 index a4c5624..79d6d88 100644
61592 --- a/include/linux/oprofile.h
61593 +++ b/include/linux/oprofile.h
61594 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
61595 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
61596 char const * name, ulong * val);
61597
61598 -/** Create a file for read-only access to an atomic_t. */
61599 +/** Create a file for read-only access to an atomic_unchecked_t. */
61600 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
61601 - char const * name, atomic_t * val);
61602 + char const * name, atomic_unchecked_t * val);
61603
61604 /** create a directory */
61605 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
61606 diff --git a/include/linux/padata.h b/include/linux/padata.h
61607 index 4633b2f..988bc08 100644
61608 --- a/include/linux/padata.h
61609 +++ b/include/linux/padata.h
61610 @@ -129,7 +129,7 @@ struct parallel_data {
61611 struct padata_instance *pinst;
61612 struct padata_parallel_queue __percpu *pqueue;
61613 struct padata_serial_queue __percpu *squeue;
61614 - atomic_t seq_nr;
61615 + atomic_unchecked_t seq_nr;
61616 atomic_t reorder_objects;
61617 atomic_t refcnt;
61618 unsigned int max_seq_nr;
61619 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
61620 index abb2776..d8b8e15 100644
61621 --- a/include/linux/perf_event.h
61622 +++ b/include/linux/perf_event.h
61623 @@ -750,8 +750,8 @@ struct perf_event {
61624
61625 enum perf_event_active_state state;
61626 unsigned int attach_state;
61627 - local64_t count;
61628 - atomic64_t child_count;
61629 + local64_t count; /* PaX: fix it one day */
61630 + atomic64_unchecked_t child_count;
61631
61632 /*
61633 * These are the total time in nanoseconds that the event
61634 @@ -802,8 +802,8 @@ struct perf_event {
61635 * These accumulate total time (in nanoseconds) that children
61636 * events have been enabled and running, respectively.
61637 */
61638 - atomic64_t child_total_time_enabled;
61639 - atomic64_t child_total_time_running;
61640 + atomic64_unchecked_t child_total_time_enabled;
61641 + atomic64_unchecked_t child_total_time_running;
61642
61643 /*
61644 * Protect attach/detach and child_list:
61645 diff --git a/include/linux/personality.h b/include/linux/personality.h
61646 index 8fc7dd1a..c19d89e 100644
61647 --- a/include/linux/personality.h
61648 +++ b/include/linux/personality.h
61649 @@ -44,6 +44,7 @@ enum {
61650 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
61651 ADDR_NO_RANDOMIZE | \
61652 ADDR_COMPAT_LAYOUT | \
61653 + ADDR_LIMIT_3GB | \
61654 MMAP_PAGE_ZERO)
61655
61656 /*
61657 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
61658 index 77257c9..51d473a 100644
61659 --- a/include/linux/pipe_fs_i.h
61660 +++ b/include/linux/pipe_fs_i.h
61661 @@ -46,9 +46,9 @@ struct pipe_buffer {
61662 struct pipe_inode_info {
61663 wait_queue_head_t wait;
61664 unsigned int nrbufs, curbuf, buffers;
61665 - unsigned int readers;
61666 - unsigned int writers;
61667 - unsigned int waiting_writers;
61668 + atomic_t readers;
61669 + atomic_t writers;
61670 + atomic_t waiting_writers;
61671 unsigned int r_counter;
61672 unsigned int w_counter;
61673 struct page *tmp_page;
61674 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
61675 index 609daae..5392427 100644
61676 --- a/include/linux/pm_runtime.h
61677 +++ b/include/linux/pm_runtime.h
61678 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
61679
61680 static inline void pm_runtime_mark_last_busy(struct device *dev)
61681 {
61682 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
61683 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
61684 }
61685
61686 #else /* !CONFIG_PM_RUNTIME */
61687 diff --git a/include/linux/poison.h b/include/linux/poison.h
61688 index 2110a81..13a11bb 100644
61689 --- a/include/linux/poison.h
61690 +++ b/include/linux/poison.h
61691 @@ -19,8 +19,8 @@
61692 * under normal circumstances, used to verify that nobody uses
61693 * non-initialized list entries.
61694 */
61695 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
61696 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
61697 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
61698 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
61699
61700 /********** include/linux/timer.h **********/
61701 /*
61702 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
61703 index 58969b2..ead129b 100644
61704 --- a/include/linux/preempt.h
61705 +++ b/include/linux/preempt.h
61706 @@ -123,7 +123,7 @@ struct preempt_ops {
61707 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
61708 void (*sched_out)(struct preempt_notifier *notifier,
61709 struct task_struct *next);
61710 -};
61711 +} __no_const;
61712
61713 /**
61714 * preempt_notifier - key for installing preemption notifiers
61715 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
61716 index 85c5073..51fac8b 100644
61717 --- a/include/linux/proc_fs.h
61718 +++ b/include/linux/proc_fs.h
61719 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
61720 return proc_create_data(name, mode, parent, proc_fops, NULL);
61721 }
61722
61723 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
61724 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
61725 +{
61726 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61727 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
61728 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61729 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
61730 +#else
61731 + return proc_create_data(name, mode, parent, proc_fops, NULL);
61732 +#endif
61733 +}
61734 +
61735 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
61736 umode_t mode, struct proc_dir_entry *base,
61737 read_proc_t *read_proc, void * data)
61738 @@ -258,7 +270,7 @@ union proc_op {
61739 int (*proc_show)(struct seq_file *m,
61740 struct pid_namespace *ns, struct pid *pid,
61741 struct task_struct *task);
61742 -};
61743 +} __no_const;
61744
61745 struct ctl_table_header;
61746 struct ctl_table;
61747 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
61748 index c2f1f6a..6fdb196 100644
61749 --- a/include/linux/ptrace.h
61750 +++ b/include/linux/ptrace.h
61751 @@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
61752 if (unlikely(ptrace_event_enabled(current, event))) {
61753 current->ptrace_message = message;
61754 ptrace_notify((event << 8) | SIGTRAP);
61755 - } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
61756 + } else if (event == PTRACE_EVENT_EXEC) {
61757 /* legacy EXEC report via SIGTRAP */
61758 - send_sig(SIGTRAP, current, 0);
61759 + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
61760 + send_sig(SIGTRAP, current, 0);
61761 }
61762 }
61763
61764 diff --git a/include/linux/random.h b/include/linux/random.h
61765 index 8f74538..02a1012 100644
61766 --- a/include/linux/random.h
61767 +++ b/include/linux/random.h
61768 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
61769
61770 u32 prandom32(struct rnd_state *);
61771
61772 +static inline unsigned long pax_get_random_long(void)
61773 +{
61774 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
61775 +}
61776 +
61777 /*
61778 * Handle minimum values for seeds
61779 */
61780 static inline u32 __seed(u32 x, u32 m)
61781 {
61782 - return (x < m) ? x + m : x;
61783 + return (x <= m) ? x + m + 1 : x;
61784 }
61785
61786 /**
61787 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
61788 index e0879a7..a12f962 100644
61789 --- a/include/linux/reboot.h
61790 +++ b/include/linux/reboot.h
61791 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
61792 * Architecture-specific implementations of sys_reboot commands.
61793 */
61794
61795 -extern void machine_restart(char *cmd);
61796 -extern void machine_halt(void);
61797 -extern void machine_power_off(void);
61798 +extern void machine_restart(char *cmd) __noreturn;
61799 +extern void machine_halt(void) __noreturn;
61800 +extern void machine_power_off(void) __noreturn;
61801
61802 extern void machine_shutdown(void);
61803 struct pt_regs;
61804 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
61805 */
61806
61807 extern void kernel_restart_prepare(char *cmd);
61808 -extern void kernel_restart(char *cmd);
61809 -extern void kernel_halt(void);
61810 -extern void kernel_power_off(void);
61811 +extern void kernel_restart(char *cmd) __noreturn;
61812 +extern void kernel_halt(void) __noreturn;
61813 +extern void kernel_power_off(void) __noreturn;
61814
61815 extern int C_A_D; /* for sysctl */
61816 void ctrl_alt_del(void);
61817 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
61818 * Emergency restart, callable from an interrupt handler.
61819 */
61820
61821 -extern void emergency_restart(void);
61822 +extern void emergency_restart(void) __noreturn;
61823 #include <asm/emergency-restart.h>
61824
61825 #endif
61826 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
61827 index 2213ddc..650212a 100644
61828 --- a/include/linux/reiserfs_fs.h
61829 +++ b/include/linux/reiserfs_fs.h
61830 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
61831 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61832
61833 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
61834 -#define get_generation(s) atomic_read (&fs_generation(s))
61835 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
61836 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
61837 #define __fs_changed(gen,s) (gen != get_generation (s))
61838 #define fs_changed(gen,s) \
61839 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
61840 index 8c9e85c..1698e9a 100644
61841 --- a/include/linux/reiserfs_fs_sb.h
61842 +++ b/include/linux/reiserfs_fs_sb.h
61843 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
61844 /* Comment? -Hans */
61845 wait_queue_head_t s_wait;
61846 /* To be obsoleted soon by per buffer seals.. -Hans */
61847 - atomic_t s_generation_counter; // increased by one every time the
61848 + atomic_unchecked_t s_generation_counter; // increased by one every time the
61849 // tree gets re-balanced
61850 unsigned long s_properties; /* File system properties. Currently holds
61851 on-disk FS format */
61852 diff --git a/include/linux/relay.h b/include/linux/relay.h
61853 index a822fd7..62b70f6 100644
61854 --- a/include/linux/relay.h
61855 +++ b/include/linux/relay.h
61856 @@ -159,7 +159,7 @@ struct rchan_callbacks
61857 * The callback should return 0 if successful, negative if not.
61858 */
61859 int (*remove_buf_file)(struct dentry *dentry);
61860 -};
61861 +} __no_const;
61862
61863 /*
61864 * CONFIG_RELAY kernel API, kernel/relay.c
61865 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
61866 index c6c6084..5bf1212 100644
61867 --- a/include/linux/rfkill.h
61868 +++ b/include/linux/rfkill.h
61869 @@ -147,6 +147,7 @@ struct rfkill_ops {
61870 void (*query)(struct rfkill *rfkill, void *data);
61871 int (*set_block)(void *data, bool blocked);
61872 };
61873 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
61874
61875 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
61876 /**
61877 diff --git a/include/linux/rio.h b/include/linux/rio.h
61878 index 4d50611..c6858a2 100644
61879 --- a/include/linux/rio.h
61880 +++ b/include/linux/rio.h
61881 @@ -315,7 +315,7 @@ struct rio_ops {
61882 int mbox, void *buffer, size_t len);
61883 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
61884 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
61885 -};
61886 +} __no_const;
61887
61888 #define RIO_RESOURCE_MEM 0x00000100
61889 #define RIO_RESOURCE_DOORBELL 0x00000200
61890 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
61891 index 1cdd62a..e399f0d 100644
61892 --- a/include/linux/rmap.h
61893 +++ b/include/linux/rmap.h
61894 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
61895 void anon_vma_init(void); /* create anon_vma_cachep */
61896 int anon_vma_prepare(struct vm_area_struct *);
61897 void unlink_anon_vmas(struct vm_area_struct *);
61898 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
61899 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
61900 void anon_vma_moveto_tail(struct vm_area_struct *);
61901 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
61902 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
61903 void __anon_vma_link(struct vm_area_struct *);
61904
61905 static inline void anon_vma_merge(struct vm_area_struct *vma,
61906 diff --git a/include/linux/sched.h b/include/linux/sched.h
61907 index 0657368..765f70f 100644
61908 --- a/include/linux/sched.h
61909 +++ b/include/linux/sched.h
61910 @@ -101,6 +101,7 @@ struct bio_list;
61911 struct fs_struct;
61912 struct perf_event_context;
61913 struct blk_plug;
61914 +struct linux_binprm;
61915
61916 /*
61917 * List of flags we want to share for kernel threads,
61918 @@ -382,10 +383,13 @@ struct user_namespace;
61919 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
61920
61921 extern int sysctl_max_map_count;
61922 +extern unsigned long sysctl_heap_stack_gap;
61923
61924 #include <linux/aio.h>
61925
61926 #ifdef CONFIG_MMU
61927 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
61928 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
61929 extern void arch_pick_mmap_layout(struct mm_struct *mm);
61930 extern unsigned long
61931 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
61932 @@ -631,6 +635,17 @@ struct signal_struct {
61933 #ifdef CONFIG_TASKSTATS
61934 struct taskstats *stats;
61935 #endif
61936 +
61937 +#ifdef CONFIG_GRKERNSEC
61938 + u32 curr_ip;
61939 + u32 saved_ip;
61940 + u32 gr_saddr;
61941 + u32 gr_daddr;
61942 + u16 gr_sport;
61943 + u16 gr_dport;
61944 + u8 used_accept:1;
61945 +#endif
61946 +
61947 #ifdef CONFIG_AUDIT
61948 unsigned audit_tty;
61949 struct tty_audit_buf *tty_audit_buf;
61950 @@ -714,6 +729,11 @@ struct user_struct {
61951 struct key *session_keyring; /* UID's default session keyring */
61952 #endif
61953
61954 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61955 + unsigned int banned;
61956 + unsigned long ban_expires;
61957 +#endif
61958 +
61959 /* Hash table maintenance information */
61960 struct hlist_node uidhash_node;
61961 uid_t uid;
61962 @@ -1354,8 +1374,8 @@ struct task_struct {
61963 struct list_head thread_group;
61964
61965 struct completion *vfork_done; /* for vfork() */
61966 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
61967 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61968 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
61969 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61970
61971 cputime_t utime, stime, utimescaled, stimescaled;
61972 cputime_t gtime;
61973 @@ -1371,13 +1391,6 @@ struct task_struct {
61974 struct task_cputime cputime_expires;
61975 struct list_head cpu_timers[3];
61976
61977 -/* process credentials */
61978 - const struct cred __rcu *real_cred; /* objective and real subjective task
61979 - * credentials (COW) */
61980 - const struct cred __rcu *cred; /* effective (overridable) subjective task
61981 - * credentials (COW) */
61982 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
61983 -
61984 char comm[TASK_COMM_LEN]; /* executable name excluding path
61985 - access with [gs]et_task_comm (which lock
61986 it with task_lock())
61987 @@ -1394,8 +1407,16 @@ struct task_struct {
61988 #endif
61989 /* CPU-specific state of this task */
61990 struct thread_struct thread;
61991 +/* thread_info moved to task_struct */
61992 +#ifdef CONFIG_X86
61993 + struct thread_info tinfo;
61994 +#endif
61995 /* filesystem information */
61996 struct fs_struct *fs;
61997 +
61998 + const struct cred __rcu *cred; /* effective (overridable) subjective task
61999 + * credentials (COW) */
62000 +
62001 /* open file information */
62002 struct files_struct *files;
62003 /* namespaces */
62004 @@ -1442,6 +1463,11 @@ struct task_struct {
62005 struct rt_mutex_waiter *pi_blocked_on;
62006 #endif
62007
62008 +/* process credentials */
62009 + const struct cred __rcu *real_cred; /* objective and real subjective task
62010 + * credentials (COW) */
62011 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62012 +
62013 #ifdef CONFIG_DEBUG_MUTEXES
62014 /* mutex deadlock detection */
62015 struct mutex_waiter *blocked_on;
62016 @@ -1558,6 +1584,27 @@ struct task_struct {
62017 unsigned long default_timer_slack_ns;
62018
62019 struct list_head *scm_work_list;
62020 +
62021 +#ifdef CONFIG_GRKERNSEC
62022 + /* grsecurity */
62023 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62024 + u64 exec_id;
62025 +#endif
62026 +#ifdef CONFIG_GRKERNSEC_SETXID
62027 + const struct cred *delayed_cred;
62028 +#endif
62029 + struct dentry *gr_chroot_dentry;
62030 + struct acl_subject_label *acl;
62031 + struct acl_role_label *role;
62032 + struct file *exec_file;
62033 + u16 acl_role_id;
62034 + /* is this the task that authenticated to the special role */
62035 + u8 acl_sp_role;
62036 + u8 is_writable;
62037 + u8 brute;
62038 + u8 gr_is_chrooted;
62039 +#endif
62040 +
62041 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62042 /* Index of current stored address in ret_stack */
62043 int curr_ret_stack;
62044 @@ -1592,6 +1639,51 @@ struct task_struct {
62045 #endif
62046 };
62047
62048 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62049 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62050 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62051 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62052 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62053 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62054 +
62055 +#ifdef CONFIG_PAX_SOFTMODE
62056 +extern int pax_softmode;
62057 +#endif
62058 +
62059 +extern int pax_check_flags(unsigned long *);
62060 +
62061 +/* if tsk != current then task_lock must be held on it */
62062 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62063 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62064 +{
62065 + if (likely(tsk->mm))
62066 + return tsk->mm->pax_flags;
62067 + else
62068 + return 0UL;
62069 +}
62070 +
62071 +/* if tsk != current then task_lock must be held on it */
62072 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62073 +{
62074 + if (likely(tsk->mm)) {
62075 + tsk->mm->pax_flags = flags;
62076 + return 0;
62077 + }
62078 + return -EINVAL;
62079 +}
62080 +#endif
62081 +
62082 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62083 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62084 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62085 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62086 +#endif
62087 +
62088 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62089 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62090 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62091 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62092 +
62093 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62094 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62095
62096 @@ -2104,7 +2196,9 @@ void yield(void);
62097 extern struct exec_domain default_exec_domain;
62098
62099 union thread_union {
62100 +#ifndef CONFIG_X86
62101 struct thread_info thread_info;
62102 +#endif
62103 unsigned long stack[THREAD_SIZE/sizeof(long)];
62104 };
62105
62106 @@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
62107 */
62108
62109 extern struct task_struct *find_task_by_vpid(pid_t nr);
62110 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62111 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62112 struct pid_namespace *ns);
62113
62114 @@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62115 extern void exit_itimers(struct signal_struct *);
62116 extern void flush_itimer_signals(void);
62117
62118 -extern void do_group_exit(int);
62119 +extern __noreturn void do_group_exit(int);
62120
62121 extern void daemonize(const char *, ...);
62122 extern int allow_signal(int);
62123 @@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62124
62125 #endif
62126
62127 -static inline int object_is_on_stack(void *obj)
62128 +static inline int object_starts_on_stack(void *obj)
62129 {
62130 - void *stack = task_stack_page(current);
62131 + const void *stack = task_stack_page(current);
62132
62133 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62134 }
62135
62136 +#ifdef CONFIG_PAX_USERCOPY
62137 +extern int object_is_on_stack(const void *obj, unsigned long len);
62138 +#endif
62139 +
62140 extern void thread_info_cache_init(void);
62141
62142 #ifdef CONFIG_DEBUG_STACK_USAGE
62143 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62144 index 899fbb4..1cb4138 100644
62145 --- a/include/linux/screen_info.h
62146 +++ b/include/linux/screen_info.h
62147 @@ -43,7 +43,8 @@ struct screen_info {
62148 __u16 pages; /* 0x32 */
62149 __u16 vesa_attributes; /* 0x34 */
62150 __u32 capabilities; /* 0x36 */
62151 - __u8 _reserved[6]; /* 0x3a */
62152 + __u16 vesapm_size; /* 0x3a */
62153 + __u8 _reserved[4]; /* 0x3c */
62154 } __attribute__((packed));
62155
62156 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62157 diff --git a/include/linux/security.h b/include/linux/security.h
62158 index 83c18e8..2d98860 100644
62159 --- a/include/linux/security.h
62160 +++ b/include/linux/security.h
62161 @@ -37,6 +37,7 @@
62162 #include <linux/xfrm.h>
62163 #include <linux/slab.h>
62164 #include <linux/xattr.h>
62165 +#include <linux/grsecurity.h>
62166 #include <net/flow.h>
62167
62168 /* Maximum number of letters for an LSM name string */
62169 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62170 index 44f1514..2bbf6c1 100644
62171 --- a/include/linux/seq_file.h
62172 +++ b/include/linux/seq_file.h
62173 @@ -24,6 +24,9 @@ struct seq_file {
62174 struct mutex lock;
62175 const struct seq_operations *op;
62176 int poll_event;
62177 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62178 + u64 exec_id;
62179 +#endif
62180 void *private;
62181 };
62182
62183 @@ -33,6 +36,7 @@ struct seq_operations {
62184 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62185 int (*show) (struct seq_file *m, void *v);
62186 };
62187 +typedef struct seq_operations __no_const seq_operations_no_const;
62188
62189 #define SEQ_SKIP 1
62190
62191 diff --git a/include/linux/shm.h b/include/linux/shm.h
62192 index 92808b8..c28cac4 100644
62193 --- a/include/linux/shm.h
62194 +++ b/include/linux/shm.h
62195 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62196
62197 /* The task created the shm object. NULL if the task is dead. */
62198 struct task_struct *shm_creator;
62199 +#ifdef CONFIG_GRKERNSEC
62200 + time_t shm_createtime;
62201 + pid_t shm_lapid;
62202 +#endif
62203 };
62204
62205 /* shm_mode upper byte flags */
62206 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62207 index ae86ade..2b51468 100644
62208 --- a/include/linux/skbuff.h
62209 +++ b/include/linux/skbuff.h
62210 @@ -654,7 +654,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62211 */
62212 static inline int skb_queue_empty(const struct sk_buff_head *list)
62213 {
62214 - return list->next == (struct sk_buff *)list;
62215 + return list->next == (const struct sk_buff *)list;
62216 }
62217
62218 /**
62219 @@ -667,7 +667,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62220 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62221 const struct sk_buff *skb)
62222 {
62223 - return skb->next == (struct sk_buff *)list;
62224 + return skb->next == (const struct sk_buff *)list;
62225 }
62226
62227 /**
62228 @@ -680,7 +680,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62229 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62230 const struct sk_buff *skb)
62231 {
62232 - return skb->prev == (struct sk_buff *)list;
62233 + return skb->prev == (const struct sk_buff *)list;
62234 }
62235
62236 /**
62237 @@ -1545,7 +1545,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62238 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62239 */
62240 #ifndef NET_SKB_PAD
62241 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62242 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62243 #endif
62244
62245 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62246 diff --git a/include/linux/slab.h b/include/linux/slab.h
62247 index 573c809..e84c132 100644
62248 --- a/include/linux/slab.h
62249 +++ b/include/linux/slab.h
62250 @@ -11,12 +11,20 @@
62251
62252 #include <linux/gfp.h>
62253 #include <linux/types.h>
62254 +#include <linux/err.h>
62255
62256 /*
62257 * Flags to pass to kmem_cache_create().
62258 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62259 */
62260 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62261 +
62262 +#ifdef CONFIG_PAX_USERCOPY
62263 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62264 +#else
62265 +#define SLAB_USERCOPY 0x00000000UL
62266 +#endif
62267 +
62268 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62269 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62270 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62271 @@ -87,10 +95,13 @@
62272 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62273 * Both make kfree a no-op.
62274 */
62275 -#define ZERO_SIZE_PTR ((void *)16)
62276 +#define ZERO_SIZE_PTR \
62277 +({ \
62278 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62279 + (void *)(-MAX_ERRNO-1L); \
62280 +})
62281
62282 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62283 - (unsigned long)ZERO_SIZE_PTR)
62284 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62285
62286 /*
62287 * struct kmem_cache related prototypes
62288 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62289 void kfree(const void *);
62290 void kzfree(const void *);
62291 size_t ksize(const void *);
62292 +void check_object_size(const void *ptr, unsigned long n, bool to);
62293
62294 /*
62295 * Allocator specific definitions. These are mainly used to establish optimized
62296 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
62297
62298 void __init kmem_cache_init_late(void);
62299
62300 +#define kmalloc(x, y) \
62301 +({ \
62302 + void *___retval; \
62303 + intoverflow_t ___x = (intoverflow_t)x; \
62304 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
62305 + ___retval = NULL; \
62306 + else \
62307 + ___retval = kmalloc((size_t)___x, (y)); \
62308 + ___retval; \
62309 +})
62310 +
62311 +#define kmalloc_node(x, y, z) \
62312 +({ \
62313 + void *___retval; \
62314 + intoverflow_t ___x = (intoverflow_t)x; \
62315 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
62316 + ___retval = NULL; \
62317 + else \
62318 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
62319 + ___retval; \
62320 +})
62321 +
62322 +#define kzalloc(x, y) \
62323 +({ \
62324 + void *___retval; \
62325 + intoverflow_t ___x = (intoverflow_t)x; \
62326 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
62327 + ___retval = NULL; \
62328 + else \
62329 + ___retval = kzalloc((size_t)___x, (y)); \
62330 + ___retval; \
62331 +})
62332 +
62333 +#define __krealloc(x, y, z) \
62334 +({ \
62335 + void *___retval; \
62336 + intoverflow_t ___y = (intoverflow_t)y; \
62337 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
62338 + ___retval = NULL; \
62339 + else \
62340 + ___retval = __krealloc((x), (size_t)___y, (z)); \
62341 + ___retval; \
62342 +})
62343 +
62344 +#define krealloc(x, y, z) \
62345 +({ \
62346 + void *___retval; \
62347 + intoverflow_t ___y = (intoverflow_t)y; \
62348 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
62349 + ___retval = NULL; \
62350 + else \
62351 + ___retval = krealloc((x), (size_t)___y, (z)); \
62352 + ___retval; \
62353 +})
62354 +
62355 #endif /* _LINUX_SLAB_H */
62356 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62357 index fbd1117..1e5e46c 100644
62358 --- a/include/linux/slab_def.h
62359 +++ b/include/linux/slab_def.h
62360 @@ -66,10 +66,10 @@ struct kmem_cache {
62361 unsigned long node_allocs;
62362 unsigned long node_frees;
62363 unsigned long node_overflow;
62364 - atomic_t allochit;
62365 - atomic_t allocmiss;
62366 - atomic_t freehit;
62367 - atomic_t freemiss;
62368 + atomic_unchecked_t allochit;
62369 + atomic_unchecked_t allocmiss;
62370 + atomic_unchecked_t freehit;
62371 + atomic_unchecked_t freemiss;
62372
62373 /*
62374 * If debugging is enabled, then the allocator can add additional
62375 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62376 index a32bcfd..53b71f4 100644
62377 --- a/include/linux/slub_def.h
62378 +++ b/include/linux/slub_def.h
62379 @@ -89,7 +89,7 @@ struct kmem_cache {
62380 struct kmem_cache_order_objects max;
62381 struct kmem_cache_order_objects min;
62382 gfp_t allocflags; /* gfp flags to use on each alloc */
62383 - int refcount; /* Refcount for slab cache destroy */
62384 + atomic_t refcount; /* Refcount for slab cache destroy */
62385 void (*ctor)(void *);
62386 int inuse; /* Offset to metadata */
62387 int align; /* Alignment */
62388 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62389 }
62390
62391 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62392 -void *__kmalloc(size_t size, gfp_t flags);
62393 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
62394
62395 static __always_inline void *
62396 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62397 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62398 index de8832d..0147b46 100644
62399 --- a/include/linux/sonet.h
62400 +++ b/include/linux/sonet.h
62401 @@ -61,7 +61,7 @@ struct sonet_stats {
62402 #include <linux/atomic.h>
62403
62404 struct k_sonet_stats {
62405 -#define __HANDLE_ITEM(i) atomic_t i
62406 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62407 __SONET_ITEMS
62408 #undef __HANDLE_ITEM
62409 };
62410 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62411 index 2c5993a..b0e79f0 100644
62412 --- a/include/linux/sunrpc/clnt.h
62413 +++ b/include/linux/sunrpc/clnt.h
62414 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62415 {
62416 switch (sap->sa_family) {
62417 case AF_INET:
62418 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62419 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62420 case AF_INET6:
62421 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62422 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62423 }
62424 return 0;
62425 }
62426 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62427 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62428 const struct sockaddr *src)
62429 {
62430 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62431 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62432 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62433
62434 dsin->sin_family = ssin->sin_family;
62435 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62436 if (sa->sa_family != AF_INET6)
62437 return 0;
62438
62439 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62440 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62441 }
62442
62443 #endif /* __KERNEL__ */
62444 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62445 index e775689..9e206d9 100644
62446 --- a/include/linux/sunrpc/sched.h
62447 +++ b/include/linux/sunrpc/sched.h
62448 @@ -105,6 +105,7 @@ struct rpc_call_ops {
62449 void (*rpc_call_done)(struct rpc_task *, void *);
62450 void (*rpc_release)(void *);
62451 };
62452 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62453
62454 struct rpc_task_setup {
62455 struct rpc_task *task;
62456 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62457 index c14fe86..393245e 100644
62458 --- a/include/linux/sunrpc/svc_rdma.h
62459 +++ b/include/linux/sunrpc/svc_rdma.h
62460 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62461 extern unsigned int svcrdma_max_requests;
62462 extern unsigned int svcrdma_max_req_size;
62463
62464 -extern atomic_t rdma_stat_recv;
62465 -extern atomic_t rdma_stat_read;
62466 -extern atomic_t rdma_stat_write;
62467 -extern atomic_t rdma_stat_sq_starve;
62468 -extern atomic_t rdma_stat_rq_starve;
62469 -extern atomic_t rdma_stat_rq_poll;
62470 -extern atomic_t rdma_stat_rq_prod;
62471 -extern atomic_t rdma_stat_sq_poll;
62472 -extern atomic_t rdma_stat_sq_prod;
62473 +extern atomic_unchecked_t rdma_stat_recv;
62474 +extern atomic_unchecked_t rdma_stat_read;
62475 +extern atomic_unchecked_t rdma_stat_write;
62476 +extern atomic_unchecked_t rdma_stat_sq_starve;
62477 +extern atomic_unchecked_t rdma_stat_rq_starve;
62478 +extern atomic_unchecked_t rdma_stat_rq_poll;
62479 +extern atomic_unchecked_t rdma_stat_rq_prod;
62480 +extern atomic_unchecked_t rdma_stat_sq_poll;
62481 +extern atomic_unchecked_t rdma_stat_sq_prod;
62482
62483 #define RPCRDMA_VERSION 1
62484
62485 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62486 index bb9127d..34ab358 100644
62487 --- a/include/linux/sysctl.h
62488 +++ b/include/linux/sysctl.h
62489 @@ -155,7 +155,11 @@ enum
62490 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62491 };
62492
62493 -
62494 +#ifdef CONFIG_PAX_SOFTMODE
62495 +enum {
62496 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62497 +};
62498 +#endif
62499
62500 /* CTL_VM names: */
62501 enum
62502 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62503
62504 extern int proc_dostring(struct ctl_table *, int,
62505 void __user *, size_t *, loff_t *);
62506 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62507 + void __user *, size_t *, loff_t *);
62508 extern int proc_dointvec(struct ctl_table *, int,
62509 void __user *, size_t *, loff_t *);
62510 extern int proc_dointvec_minmax(struct ctl_table *, int,
62511 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
62512 index a71a292..51bd91d 100644
62513 --- a/include/linux/tracehook.h
62514 +++ b/include/linux/tracehook.h
62515 @@ -54,12 +54,12 @@ struct linux_binprm;
62516 /*
62517 * ptrace report for syscall entry and exit looks identical.
62518 */
62519 -static inline void ptrace_report_syscall(struct pt_regs *regs)
62520 +static inline int ptrace_report_syscall(struct pt_regs *regs)
62521 {
62522 int ptrace = current->ptrace;
62523
62524 if (!(ptrace & PT_PTRACED))
62525 - return;
62526 + return 0;
62527
62528 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
62529
62530 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62531 send_sig(current->exit_code, current, 1);
62532 current->exit_code = 0;
62533 }
62534 +
62535 + return fatal_signal_pending(current);
62536 }
62537
62538 /**
62539 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62540 static inline __must_check int tracehook_report_syscall_entry(
62541 struct pt_regs *regs)
62542 {
62543 - ptrace_report_syscall(regs);
62544 - return 0;
62545 + return ptrace_report_syscall(regs);
62546 }
62547
62548 /**
62549 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62550 index ff7dc08..893e1bd 100644
62551 --- a/include/linux/tty_ldisc.h
62552 +++ b/include/linux/tty_ldisc.h
62553 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62554
62555 struct module *owner;
62556
62557 - int refcount;
62558 + atomic_t refcount;
62559 };
62560
62561 struct tty_ldisc {
62562 diff --git a/include/linux/types.h b/include/linux/types.h
62563 index e5fa503..df6e8a4 100644
62564 --- a/include/linux/types.h
62565 +++ b/include/linux/types.h
62566 @@ -214,10 +214,26 @@ typedef struct {
62567 int counter;
62568 } atomic_t;
62569
62570 +#ifdef CONFIG_PAX_REFCOUNT
62571 +typedef struct {
62572 + int counter;
62573 +} atomic_unchecked_t;
62574 +#else
62575 +typedef atomic_t atomic_unchecked_t;
62576 +#endif
62577 +
62578 #ifdef CONFIG_64BIT
62579 typedef struct {
62580 long counter;
62581 } atomic64_t;
62582 +
62583 +#ifdef CONFIG_PAX_REFCOUNT
62584 +typedef struct {
62585 + long counter;
62586 +} atomic64_unchecked_t;
62587 +#else
62588 +typedef atomic64_t atomic64_unchecked_t;
62589 +#endif
62590 #endif
62591
62592 struct list_head {
62593 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62594 index 5ca0951..ab496a5 100644
62595 --- a/include/linux/uaccess.h
62596 +++ b/include/linux/uaccess.h
62597 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62598 long ret; \
62599 mm_segment_t old_fs = get_fs(); \
62600 \
62601 - set_fs(KERNEL_DS); \
62602 pagefault_disable(); \
62603 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62604 - pagefault_enable(); \
62605 + set_fs(KERNEL_DS); \
62606 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62607 set_fs(old_fs); \
62608 + pagefault_enable(); \
62609 ret; \
62610 })
62611
62612 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
62613 index 99c1b4d..bb94261 100644
62614 --- a/include/linux/unaligned/access_ok.h
62615 +++ b/include/linux/unaligned/access_ok.h
62616 @@ -6,32 +6,32 @@
62617
62618 static inline u16 get_unaligned_le16(const void *p)
62619 {
62620 - return le16_to_cpup((__le16 *)p);
62621 + return le16_to_cpup((const __le16 *)p);
62622 }
62623
62624 static inline u32 get_unaligned_le32(const void *p)
62625 {
62626 - return le32_to_cpup((__le32 *)p);
62627 + return le32_to_cpup((const __le32 *)p);
62628 }
62629
62630 static inline u64 get_unaligned_le64(const void *p)
62631 {
62632 - return le64_to_cpup((__le64 *)p);
62633 + return le64_to_cpup((const __le64 *)p);
62634 }
62635
62636 static inline u16 get_unaligned_be16(const void *p)
62637 {
62638 - return be16_to_cpup((__be16 *)p);
62639 + return be16_to_cpup((const __be16 *)p);
62640 }
62641
62642 static inline u32 get_unaligned_be32(const void *p)
62643 {
62644 - return be32_to_cpup((__be32 *)p);
62645 + return be32_to_cpup((const __be32 *)p);
62646 }
62647
62648 static inline u64 get_unaligned_be64(const void *p)
62649 {
62650 - return be64_to_cpup((__be64 *)p);
62651 + return be64_to_cpup((const __be64 *)p);
62652 }
62653
62654 static inline void put_unaligned_le16(u16 val, void *p)
62655 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
62656 index 0d3f988..000f101 100644
62657 --- a/include/linux/usb/renesas_usbhs.h
62658 +++ b/include/linux/usb/renesas_usbhs.h
62659 @@ -39,7 +39,7 @@ enum {
62660 */
62661 struct renesas_usbhs_driver_callback {
62662 int (*notify_hotplug)(struct platform_device *pdev);
62663 -};
62664 +} __no_const;
62665
62666 /*
62667 * callback functions for platform
62668 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
62669 * VBUS control is needed for Host
62670 */
62671 int (*set_vbus)(struct platform_device *pdev, int enable);
62672 -};
62673 +} __no_const;
62674
62675 /*
62676 * parameters for renesas usbhs
62677 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
62678 index 6f8fbcf..8259001 100644
62679 --- a/include/linux/vermagic.h
62680 +++ b/include/linux/vermagic.h
62681 @@ -25,9 +25,35 @@
62682 #define MODULE_ARCH_VERMAGIC ""
62683 #endif
62684
62685 +#ifdef CONFIG_PAX_REFCOUNT
62686 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
62687 +#else
62688 +#define MODULE_PAX_REFCOUNT ""
62689 +#endif
62690 +
62691 +#ifdef CONSTIFY_PLUGIN
62692 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
62693 +#else
62694 +#define MODULE_CONSTIFY_PLUGIN ""
62695 +#endif
62696 +
62697 +#ifdef STACKLEAK_PLUGIN
62698 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
62699 +#else
62700 +#define MODULE_STACKLEAK_PLUGIN ""
62701 +#endif
62702 +
62703 +#ifdef CONFIG_GRKERNSEC
62704 +#define MODULE_GRSEC "GRSEC "
62705 +#else
62706 +#define MODULE_GRSEC ""
62707 +#endif
62708 +
62709 #define VERMAGIC_STRING \
62710 UTS_RELEASE " " \
62711 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
62712 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
62713 - MODULE_ARCH_VERMAGIC
62714 + MODULE_ARCH_VERMAGIC \
62715 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
62716 + MODULE_GRSEC
62717
62718 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
62719 index dcdfc2b..f937197 100644
62720 --- a/include/linux/vmalloc.h
62721 +++ b/include/linux/vmalloc.h
62722 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
62723 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
62724 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
62725 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
62726 +
62727 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
62728 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
62729 +#endif
62730 +
62731 /* bits [20..32] reserved for arch specific ioremap internals */
62732
62733 /*
62734 @@ -157,4 +162,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
62735 # endif
62736 #endif
62737
62738 +#define vmalloc(x) \
62739 +({ \
62740 + void *___retval; \
62741 + intoverflow_t ___x = (intoverflow_t)x; \
62742 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
62743 + ___retval = NULL; \
62744 + else \
62745 + ___retval = vmalloc((unsigned long)___x); \
62746 + ___retval; \
62747 +})
62748 +
62749 +#define vzalloc(x) \
62750 +({ \
62751 + void *___retval; \
62752 + intoverflow_t ___x = (intoverflow_t)x; \
62753 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
62754 + ___retval = NULL; \
62755 + else \
62756 + ___retval = vzalloc((unsigned long)___x); \
62757 + ___retval; \
62758 +})
62759 +
62760 +#define __vmalloc(x, y, z) \
62761 +({ \
62762 + void *___retval; \
62763 + intoverflow_t ___x = (intoverflow_t)x; \
62764 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
62765 + ___retval = NULL; \
62766 + else \
62767 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
62768 + ___retval; \
62769 +})
62770 +
62771 +#define vmalloc_user(x) \
62772 +({ \
62773 + void *___retval; \
62774 + intoverflow_t ___x = (intoverflow_t)x; \
62775 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
62776 + ___retval = NULL; \
62777 + else \
62778 + ___retval = vmalloc_user((unsigned long)___x); \
62779 + ___retval; \
62780 +})
62781 +
62782 +#define vmalloc_exec(x) \
62783 +({ \
62784 + void *___retval; \
62785 + intoverflow_t ___x = (intoverflow_t)x; \
62786 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
62787 + ___retval = NULL; \
62788 + else \
62789 + ___retval = vmalloc_exec((unsigned long)___x); \
62790 + ___retval; \
62791 +})
62792 +
62793 +#define vmalloc_node(x, y) \
62794 +({ \
62795 + void *___retval; \
62796 + intoverflow_t ___x = (intoverflow_t)x; \
62797 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
62798 + ___retval = NULL; \
62799 + else \
62800 + ___retval = vmalloc_node((unsigned long)___x, (y));\
62801 + ___retval; \
62802 +})
62803 +
62804 +#define vzalloc_node(x, y) \
62805 +({ \
62806 + void *___retval; \
62807 + intoverflow_t ___x = (intoverflow_t)x; \
62808 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
62809 + ___retval = NULL; \
62810 + else \
62811 + ___retval = vzalloc_node((unsigned long)___x, (y));\
62812 + ___retval; \
62813 +})
62814 +
62815 +#define vmalloc_32(x) \
62816 +({ \
62817 + void *___retval; \
62818 + intoverflow_t ___x = (intoverflow_t)x; \
62819 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
62820 + ___retval = NULL; \
62821 + else \
62822 + ___retval = vmalloc_32((unsigned long)___x); \
62823 + ___retval; \
62824 +})
62825 +
62826 +#define vmalloc_32_user(x) \
62827 +({ \
62828 +void *___retval; \
62829 + intoverflow_t ___x = (intoverflow_t)x; \
62830 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
62831 + ___retval = NULL; \
62832 + else \
62833 + ___retval = vmalloc_32_user((unsigned long)___x);\
62834 + ___retval; \
62835 +})
62836 +
62837 #endif /* _LINUX_VMALLOC_H */
62838 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
62839 index 65efb92..137adbb 100644
62840 --- a/include/linux/vmstat.h
62841 +++ b/include/linux/vmstat.h
62842 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
62843 /*
62844 * Zone based page accounting with per cpu differentials.
62845 */
62846 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62847 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62848
62849 static inline void zone_page_state_add(long x, struct zone *zone,
62850 enum zone_stat_item item)
62851 {
62852 - atomic_long_add(x, &zone->vm_stat[item]);
62853 - atomic_long_add(x, &vm_stat[item]);
62854 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
62855 + atomic_long_add_unchecked(x, &vm_stat[item]);
62856 }
62857
62858 static inline unsigned long global_page_state(enum zone_stat_item item)
62859 {
62860 - long x = atomic_long_read(&vm_stat[item]);
62861 + long x = atomic_long_read_unchecked(&vm_stat[item]);
62862 #ifdef CONFIG_SMP
62863 if (x < 0)
62864 x = 0;
62865 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
62866 static inline unsigned long zone_page_state(struct zone *zone,
62867 enum zone_stat_item item)
62868 {
62869 - long x = atomic_long_read(&zone->vm_stat[item]);
62870 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
62871 #ifdef CONFIG_SMP
62872 if (x < 0)
62873 x = 0;
62874 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
62875 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
62876 enum zone_stat_item item)
62877 {
62878 - long x = atomic_long_read(&zone->vm_stat[item]);
62879 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
62880
62881 #ifdef CONFIG_SMP
62882 int cpu;
62883 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
62884
62885 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
62886 {
62887 - atomic_long_inc(&zone->vm_stat[item]);
62888 - atomic_long_inc(&vm_stat[item]);
62889 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
62890 + atomic_long_inc_unchecked(&vm_stat[item]);
62891 }
62892
62893 static inline void __inc_zone_page_state(struct page *page,
62894 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
62895
62896 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
62897 {
62898 - atomic_long_dec(&zone->vm_stat[item]);
62899 - atomic_long_dec(&vm_stat[item]);
62900 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
62901 + atomic_long_dec_unchecked(&vm_stat[item]);
62902 }
62903
62904 static inline void __dec_zone_page_state(struct page *page,
62905 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
62906 index e5d1220..ef6e406 100644
62907 --- a/include/linux/xattr.h
62908 +++ b/include/linux/xattr.h
62909 @@ -57,6 +57,11 @@
62910 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
62911 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
62912
62913 +/* User namespace */
62914 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
62915 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
62916 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
62917 +
62918 #ifdef __KERNEL__
62919
62920 #include <linux/types.h>
62921 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
62922 index 4aeff96..b378cdc 100644
62923 --- a/include/media/saa7146_vv.h
62924 +++ b/include/media/saa7146_vv.h
62925 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
62926 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
62927
62928 /* the extension can override this */
62929 - struct v4l2_ioctl_ops ops;
62930 + v4l2_ioctl_ops_no_const ops;
62931 /* pointer to the saa7146 core ops */
62932 const struct v4l2_ioctl_ops *core_ops;
62933
62934 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
62935 index c7c40f1..4f01585 100644
62936 --- a/include/media/v4l2-dev.h
62937 +++ b/include/media/v4l2-dev.h
62938 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
62939
62940
62941 struct v4l2_file_operations {
62942 - struct module *owner;
62943 + struct module * const owner;
62944 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
62945 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
62946 unsigned int (*poll) (struct file *, struct poll_table_struct *);
62947 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
62948 int (*open) (struct file *);
62949 int (*release) (struct file *);
62950 };
62951 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
62952
62953 /*
62954 * Newer version of video_device, handled by videodev2.c
62955 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
62956 index 3f5d60f..44210ed 100644
62957 --- a/include/media/v4l2-ioctl.h
62958 +++ b/include/media/v4l2-ioctl.h
62959 @@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
62960 long (*vidioc_default) (struct file *file, void *fh,
62961 bool valid_prio, int cmd, void *arg);
62962 };
62963 -
62964 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
62965
62966 /* v4l debugging and diagnostics */
62967
62968 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
62969 index 8d55251..dfe5b0a 100644
62970 --- a/include/net/caif/caif_hsi.h
62971 +++ b/include/net/caif/caif_hsi.h
62972 @@ -98,7 +98,7 @@ struct cfhsi_drv {
62973 void (*rx_done_cb) (struct cfhsi_drv *drv);
62974 void (*wake_up_cb) (struct cfhsi_drv *drv);
62975 void (*wake_down_cb) (struct cfhsi_drv *drv);
62976 -};
62977 +} __no_const;
62978
62979 /* Structure implemented by HSI device. */
62980 struct cfhsi_dev {
62981 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
62982 index 9e5425b..8136ffc 100644
62983 --- a/include/net/caif/cfctrl.h
62984 +++ b/include/net/caif/cfctrl.h
62985 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
62986 void (*radioset_rsp)(void);
62987 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
62988 struct cflayer *client_layer);
62989 -};
62990 +} __no_const;
62991
62992 /* Link Setup Parameters for CAIF-Links. */
62993 struct cfctrl_link_param {
62994 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
62995 struct cfctrl {
62996 struct cfsrvl serv;
62997 struct cfctrl_rsp res;
62998 - atomic_t req_seq_no;
62999 - atomic_t rsp_seq_no;
63000 + atomic_unchecked_t req_seq_no;
63001 + atomic_unchecked_t rsp_seq_no;
63002 struct list_head list;
63003 /* Protects from simultaneous access to first_req list */
63004 spinlock_t info_list_lock;
63005 diff --git a/include/net/flow.h b/include/net/flow.h
63006 index 6c469db..7743b8e 100644
63007 --- a/include/net/flow.h
63008 +++ b/include/net/flow.h
63009 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63010
63011 extern void flow_cache_flush(void);
63012 extern void flow_cache_flush_deferred(void);
63013 -extern atomic_t flow_cache_genid;
63014 +extern atomic_unchecked_t flow_cache_genid;
63015
63016 #endif
63017 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63018 index b94765e..053f68b 100644
63019 --- a/include/net/inetpeer.h
63020 +++ b/include/net/inetpeer.h
63021 @@ -48,8 +48,8 @@ struct inet_peer {
63022 */
63023 union {
63024 struct {
63025 - atomic_t rid; /* Frag reception counter */
63026 - atomic_t ip_id_count; /* IP ID for the next packet */
63027 + atomic_unchecked_t rid; /* Frag reception counter */
63028 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63029 __u32 tcp_ts;
63030 __u32 tcp_ts_stamp;
63031 };
63032 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63033 more++;
63034 inet_peer_refcheck(p);
63035 do {
63036 - old = atomic_read(&p->ip_id_count);
63037 + old = atomic_read_unchecked(&p->ip_id_count);
63038 new = old + more;
63039 if (!new)
63040 new = 1;
63041 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63042 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63043 return new;
63044 }
63045
63046 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63047 index 10422ef..662570f 100644
63048 --- a/include/net/ip_fib.h
63049 +++ b/include/net/ip_fib.h
63050 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63051
63052 #define FIB_RES_SADDR(net, res) \
63053 ((FIB_RES_NH(res).nh_saddr_genid == \
63054 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63055 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63056 FIB_RES_NH(res).nh_saddr : \
63057 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63058 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63059 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63060 index ebe517f..1bd286b 100644
63061 --- a/include/net/ip_vs.h
63062 +++ b/include/net/ip_vs.h
63063 @@ -509,7 +509,7 @@ struct ip_vs_conn {
63064 struct ip_vs_conn *control; /* Master control connection */
63065 atomic_t n_control; /* Number of controlled ones */
63066 struct ip_vs_dest *dest; /* real server */
63067 - atomic_t in_pkts; /* incoming packet counter */
63068 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63069
63070 /* packet transmitter for different forwarding methods. If it
63071 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63072 @@ -647,7 +647,7 @@ struct ip_vs_dest {
63073 __be16 port; /* port number of the server */
63074 union nf_inet_addr addr; /* IP address of the server */
63075 volatile unsigned flags; /* dest status flags */
63076 - atomic_t conn_flags; /* flags to copy to conn */
63077 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63078 atomic_t weight; /* server weight */
63079
63080 atomic_t refcnt; /* reference counter */
63081 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63082 index 69b610a..fe3962c 100644
63083 --- a/include/net/irda/ircomm_core.h
63084 +++ b/include/net/irda/ircomm_core.h
63085 @@ -51,7 +51,7 @@ typedef struct {
63086 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63087 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63088 struct ircomm_info *);
63089 -} call_t;
63090 +} __no_const call_t;
63091
63092 struct ircomm_cb {
63093 irda_queue_t queue;
63094 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63095 index 59ba38bc..d515662 100644
63096 --- a/include/net/irda/ircomm_tty.h
63097 +++ b/include/net/irda/ircomm_tty.h
63098 @@ -35,6 +35,7 @@
63099 #include <linux/termios.h>
63100 #include <linux/timer.h>
63101 #include <linux/tty.h> /* struct tty_struct */
63102 +#include <asm/local.h>
63103
63104 #include <net/irda/irias_object.h>
63105 #include <net/irda/ircomm_core.h>
63106 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63107 unsigned short close_delay;
63108 unsigned short closing_wait; /* time to wait before closing */
63109
63110 - int open_count;
63111 - int blocked_open; /* # of blocked opens */
63112 + local_t open_count;
63113 + local_t blocked_open; /* # of blocked opens */
63114
63115 /* Protect concurent access to :
63116 * o self->open_count
63117 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63118 index 0954ec9..7413562 100644
63119 --- a/include/net/iucv/af_iucv.h
63120 +++ b/include/net/iucv/af_iucv.h
63121 @@ -138,7 +138,7 @@ struct iucv_sock {
63122 struct iucv_sock_list {
63123 struct hlist_head head;
63124 rwlock_t lock;
63125 - atomic_t autobind_name;
63126 + atomic_unchecked_t autobind_name;
63127 };
63128
63129 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63130 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63131 index 34c996f..bb3b4d4 100644
63132 --- a/include/net/neighbour.h
63133 +++ b/include/net/neighbour.h
63134 @@ -123,7 +123,7 @@ struct neigh_ops {
63135 void (*error_report)(struct neighbour *, struct sk_buff *);
63136 int (*output)(struct neighbour *, struct sk_buff *);
63137 int (*connected_output)(struct neighbour *, struct sk_buff *);
63138 -};
63139 +} __do_const;
63140
63141 struct pneigh_entry {
63142 struct pneigh_entry *next;
63143 diff --git a/include/net/netlink.h b/include/net/netlink.h
63144 index cb1f350..3279d2c 100644
63145 --- a/include/net/netlink.h
63146 +++ b/include/net/netlink.h
63147 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63148 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63149 {
63150 if (mark)
63151 - skb_trim(skb, (unsigned char *) mark - skb->data);
63152 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63153 }
63154
63155 /**
63156 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63157 index bbd023a..97c6d0d 100644
63158 --- a/include/net/netns/ipv4.h
63159 +++ b/include/net/netns/ipv4.h
63160 @@ -57,8 +57,8 @@ struct netns_ipv4 {
63161 unsigned int sysctl_ping_group_range[2];
63162 long sysctl_tcp_mem[3];
63163
63164 - atomic_t rt_genid;
63165 - atomic_t dev_addr_genid;
63166 + atomic_unchecked_t rt_genid;
63167 + atomic_unchecked_t dev_addr_genid;
63168
63169 #ifdef CONFIG_IP_MROUTE
63170 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63171 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63172 index d368561..96aaa17 100644
63173 --- a/include/net/sctp/sctp.h
63174 +++ b/include/net/sctp/sctp.h
63175 @@ -318,9 +318,9 @@ do { \
63176
63177 #else /* SCTP_DEBUG */
63178
63179 -#define SCTP_DEBUG_PRINTK(whatever...)
63180 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63181 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63182 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63183 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63184 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63185 #define SCTP_ENABLE_DEBUG
63186 #define SCTP_DISABLE_DEBUG
63187 #define SCTP_ASSERT(expr, str, func)
63188 diff --git a/include/net/sock.h b/include/net/sock.h
63189 index 91c1c8b..15ae923 100644
63190 --- a/include/net/sock.h
63191 +++ b/include/net/sock.h
63192 @@ -299,7 +299,7 @@ struct sock {
63193 #ifdef CONFIG_RPS
63194 __u32 sk_rxhash;
63195 #endif
63196 - atomic_t sk_drops;
63197 + atomic_unchecked_t sk_drops;
63198 int sk_rcvbuf;
63199
63200 struct sk_filter __rcu *sk_filter;
63201 @@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63202 }
63203
63204 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63205 - char __user *from, char *to,
63206 + char __user *from, unsigned char *to,
63207 int copy, int offset)
63208 {
63209 if (skb->ip_summed == CHECKSUM_NONE) {
63210 diff --git a/include/net/tcp.h b/include/net/tcp.h
63211 index 2d80c29..aa07caf 100644
63212 --- a/include/net/tcp.h
63213 +++ b/include/net/tcp.h
63214 @@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
63215 char *name;
63216 sa_family_t family;
63217 const struct file_operations *seq_fops;
63218 - struct seq_operations seq_ops;
63219 + seq_operations_no_const seq_ops;
63220 };
63221
63222 struct tcp_iter_state {
63223 diff --git a/include/net/udp.h b/include/net/udp.h
63224 index e39592f..fef9680 100644
63225 --- a/include/net/udp.h
63226 +++ b/include/net/udp.h
63227 @@ -243,7 +243,7 @@ struct udp_seq_afinfo {
63228 sa_family_t family;
63229 struct udp_table *udp_table;
63230 const struct file_operations *seq_fops;
63231 - struct seq_operations seq_ops;
63232 + seq_operations_no_const seq_ops;
63233 };
63234
63235 struct udp_iter_state {
63236 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63237 index 89174e2..1f82598 100644
63238 --- a/include/net/xfrm.h
63239 +++ b/include/net/xfrm.h
63240 @@ -505,7 +505,7 @@ struct xfrm_policy {
63241 struct timer_list timer;
63242
63243 struct flow_cache_object flo;
63244 - atomic_t genid;
63245 + atomic_unchecked_t genid;
63246 u32 priority;
63247 u32 index;
63248 struct xfrm_mark mark;
63249 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63250 index 1a046b1..ee0bef0 100644
63251 --- a/include/rdma/iw_cm.h
63252 +++ b/include/rdma/iw_cm.h
63253 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63254 int backlog);
63255
63256 int (*destroy_listen)(struct iw_cm_id *cm_id);
63257 -};
63258 +} __no_const;
63259
63260 /**
63261 * iw_create_cm_id - Create an IW CM identifier.
63262 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63263 index 6a3922f..0b73022 100644
63264 --- a/include/scsi/libfc.h
63265 +++ b/include/scsi/libfc.h
63266 @@ -748,6 +748,7 @@ struct libfc_function_template {
63267 */
63268 void (*disc_stop_final) (struct fc_lport *);
63269 };
63270 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63271
63272 /**
63273 * struct fc_disc - Discovery context
63274 @@ -851,7 +852,7 @@ struct fc_lport {
63275 struct fc_vport *vport;
63276
63277 /* Operational Information */
63278 - struct libfc_function_template tt;
63279 + libfc_function_template_no_const tt;
63280 u8 link_up;
63281 u8 qfull;
63282 enum fc_lport_state state;
63283 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63284 index 77273f2..dd4031f 100644
63285 --- a/include/scsi/scsi_device.h
63286 +++ b/include/scsi/scsi_device.h
63287 @@ -161,9 +161,9 @@ struct scsi_device {
63288 unsigned int max_device_blocked; /* what device_blocked counts down from */
63289 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63290
63291 - atomic_t iorequest_cnt;
63292 - atomic_t iodone_cnt;
63293 - atomic_t ioerr_cnt;
63294 + atomic_unchecked_t iorequest_cnt;
63295 + atomic_unchecked_t iodone_cnt;
63296 + atomic_unchecked_t ioerr_cnt;
63297
63298 struct device sdev_gendev,
63299 sdev_dev;
63300 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63301 index 2a65167..91e01f8 100644
63302 --- a/include/scsi/scsi_transport_fc.h
63303 +++ b/include/scsi/scsi_transport_fc.h
63304 @@ -711,7 +711,7 @@ struct fc_function_template {
63305 unsigned long show_host_system_hostname:1;
63306
63307 unsigned long disable_target_scan:1;
63308 -};
63309 +} __do_const;
63310
63311
63312 /**
63313 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63314 index 030b87c..98a6954 100644
63315 --- a/include/sound/ak4xxx-adda.h
63316 +++ b/include/sound/ak4xxx-adda.h
63317 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63318 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63319 unsigned char val);
63320 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63321 -};
63322 +} __no_const;
63323
63324 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63325
63326 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63327 index 8c05e47..2b5df97 100644
63328 --- a/include/sound/hwdep.h
63329 +++ b/include/sound/hwdep.h
63330 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63331 struct snd_hwdep_dsp_status *status);
63332 int (*dsp_load)(struct snd_hwdep *hw,
63333 struct snd_hwdep_dsp_image *image);
63334 -};
63335 +} __no_const;
63336
63337 struct snd_hwdep {
63338 struct snd_card *card;
63339 diff --git a/include/sound/info.h b/include/sound/info.h
63340 index 9ca1a49..aba1728 100644
63341 --- a/include/sound/info.h
63342 +++ b/include/sound/info.h
63343 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63344 struct snd_info_buffer *buffer);
63345 void (*write)(struct snd_info_entry *entry,
63346 struct snd_info_buffer *buffer);
63347 -};
63348 +} __no_const;
63349
63350 struct snd_info_entry_ops {
63351 int (*open)(struct snd_info_entry *entry,
63352 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63353 index 0cf91b2..b70cae4 100644
63354 --- a/include/sound/pcm.h
63355 +++ b/include/sound/pcm.h
63356 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63357 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63358 int (*ack)(struct snd_pcm_substream *substream);
63359 };
63360 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63361
63362 /*
63363 *
63364 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63365 index af1b49e..a5d55a5 100644
63366 --- a/include/sound/sb16_csp.h
63367 +++ b/include/sound/sb16_csp.h
63368 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63369 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63370 int (*csp_stop) (struct snd_sb_csp * p);
63371 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63372 -};
63373 +} __no_const;
63374
63375 /*
63376 * CSP private data
63377 diff --git a/include/sound/soc.h b/include/sound/soc.h
63378 index 0992dff..bb366fe 100644
63379 --- a/include/sound/soc.h
63380 +++ b/include/sound/soc.h
63381 @@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
63382 /* platform IO - used for platform DAPM */
63383 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63384 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63385 -};
63386 +} __do_const;
63387
63388 struct snd_soc_platform {
63389 const char *name;
63390 @@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
63391 struct snd_soc_dai_link *dai_link;
63392 struct mutex pcm_mutex;
63393 enum snd_soc_pcm_subclass pcm_subclass;
63394 - struct snd_pcm_ops ops;
63395 + snd_pcm_ops_no_const ops;
63396
63397 unsigned int complete:1;
63398 unsigned int dev_registered:1;
63399 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63400 index 444cd6b..3327cc5 100644
63401 --- a/include/sound/ymfpci.h
63402 +++ b/include/sound/ymfpci.h
63403 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63404 spinlock_t reg_lock;
63405 spinlock_t voice_lock;
63406 wait_queue_head_t interrupt_sleep;
63407 - atomic_t interrupt_sleep_count;
63408 + atomic_unchecked_t interrupt_sleep_count;
63409 struct snd_info_entry *proc_entry;
63410 const struct firmware *dsp_microcode;
63411 const struct firmware *controller_microcode;
63412 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63413 index dc4e345..6bf6080 100644
63414 --- a/include/target/target_core_base.h
63415 +++ b/include/target/target_core_base.h
63416 @@ -443,7 +443,7 @@ struct t10_reservation_ops {
63417 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63418 int (*t10_pr_register)(struct se_cmd *);
63419 int (*t10_pr_clear)(struct se_cmd *);
63420 -};
63421 +} __no_const;
63422
63423 struct t10_reservation {
63424 /* Reservation effects all target ports */
63425 @@ -561,8 +561,8 @@ struct se_cmd {
63426 atomic_t t_se_count;
63427 atomic_t t_task_cdbs_left;
63428 atomic_t t_task_cdbs_ex_left;
63429 - atomic_t t_task_cdbs_sent;
63430 - atomic_t t_transport_aborted;
63431 + atomic_unchecked_t t_task_cdbs_sent;
63432 + atomic_unchecked_t t_transport_aborted;
63433 atomic_t t_transport_active;
63434 atomic_t t_transport_complete;
63435 atomic_t t_transport_queue_active;
63436 @@ -799,7 +799,7 @@ struct se_device {
63437 spinlock_t stats_lock;
63438 /* Active commands on this virtual SE device */
63439 atomic_t simple_cmds;
63440 - atomic_t dev_ordered_id;
63441 + atomic_unchecked_t dev_ordered_id;
63442 atomic_t execute_tasks;
63443 atomic_t dev_ordered_sync;
63444 atomic_t dev_qf_count;
63445 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63446 index 1c09820..7f5ec79 100644
63447 --- a/include/trace/events/irq.h
63448 +++ b/include/trace/events/irq.h
63449 @@ -36,7 +36,7 @@ struct softirq_action;
63450 */
63451 TRACE_EVENT(irq_handler_entry,
63452
63453 - TP_PROTO(int irq, struct irqaction *action),
63454 + TP_PROTO(int irq, const struct irqaction *action),
63455
63456 TP_ARGS(irq, action),
63457
63458 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63459 */
63460 TRACE_EVENT(irq_handler_exit,
63461
63462 - TP_PROTO(int irq, struct irqaction *action, int ret),
63463 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63464
63465 TP_ARGS(irq, action, ret),
63466
63467 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63468 index c41f308..6918de3 100644
63469 --- a/include/video/udlfb.h
63470 +++ b/include/video/udlfb.h
63471 @@ -52,10 +52,10 @@ struct dlfb_data {
63472 u32 pseudo_palette[256];
63473 int blank_mode; /*one of FB_BLANK_ */
63474 /* blit-only rendering path metrics, exposed through sysfs */
63475 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63476 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63477 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63478 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63479 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63480 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63481 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63482 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63483 };
63484
63485 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63486 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63487 index 0993a22..32ba2fe 100644
63488 --- a/include/video/uvesafb.h
63489 +++ b/include/video/uvesafb.h
63490 @@ -177,6 +177,7 @@ struct uvesafb_par {
63491 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63492 u8 pmi_setpal; /* PMI for palette changes */
63493 u16 *pmi_base; /* protected mode interface location */
63494 + u8 *pmi_code; /* protected mode code location */
63495 void *pmi_start;
63496 void *pmi_pal;
63497 u8 *vbe_state_orig; /*
63498 diff --git a/init/Kconfig b/init/Kconfig
63499 index 3f42cd6..613f41d 100644
63500 --- a/init/Kconfig
63501 +++ b/init/Kconfig
63502 @@ -799,6 +799,7 @@ endif # CGROUPS
63503
63504 config CHECKPOINT_RESTORE
63505 bool "Checkpoint/restore support" if EXPERT
63506 + depends on !GRKERNSEC
63507 default n
63508 help
63509 Enables additional kernel features in a sake of checkpoint/restore.
63510 @@ -1249,7 +1250,7 @@ config SLUB_DEBUG
63511
63512 config COMPAT_BRK
63513 bool "Disable heap randomization"
63514 - default y
63515 + default n
63516 help
63517 Randomizing heap placement makes heap exploits harder, but it
63518 also breaks ancient binaries (including anything libc5 based).
63519 diff --git a/init/do_mounts.c b/init/do_mounts.c
63520 index 2974c8b..0b863ae 100644
63521 --- a/init/do_mounts.c
63522 +++ b/init/do_mounts.c
63523 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63524 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63525 {
63526 struct super_block *s;
63527 - int err = sys_mount(name, "/root", fs, flags, data);
63528 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63529 if (err)
63530 return err;
63531
63532 - sys_chdir((const char __user __force *)"/root");
63533 + sys_chdir((const char __force_user *)"/root");
63534 s = current->fs->pwd.dentry->d_sb;
63535 ROOT_DEV = s->s_dev;
63536 printk(KERN_INFO
63537 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63538 va_start(args, fmt);
63539 vsprintf(buf, fmt, args);
63540 va_end(args);
63541 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63542 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63543 if (fd >= 0) {
63544 sys_ioctl(fd, FDEJECT, 0);
63545 sys_close(fd);
63546 }
63547 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63548 - fd = sys_open("/dev/console", O_RDWR, 0);
63549 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63550 if (fd >= 0) {
63551 sys_ioctl(fd, TCGETS, (long)&termios);
63552 termios.c_lflag &= ~ICANON;
63553 sys_ioctl(fd, TCSETSF, (long)&termios);
63554 - sys_read(fd, &c, 1);
63555 + sys_read(fd, (char __user *)&c, 1);
63556 termios.c_lflag |= ICANON;
63557 sys_ioctl(fd, TCSETSF, (long)&termios);
63558 sys_close(fd);
63559 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63560 mount_root();
63561 out:
63562 devtmpfs_mount("dev");
63563 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63564 - sys_chroot((const char __user __force *)".");
63565 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63566 + sys_chroot((const char __force_user *)".");
63567 }
63568 diff --git a/init/do_mounts.h b/init/do_mounts.h
63569 index f5b978a..69dbfe8 100644
63570 --- a/init/do_mounts.h
63571 +++ b/init/do_mounts.h
63572 @@ -15,15 +15,15 @@ extern int root_mountflags;
63573
63574 static inline int create_dev(char *name, dev_t dev)
63575 {
63576 - sys_unlink(name);
63577 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63578 + sys_unlink((char __force_user *)name);
63579 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63580 }
63581
63582 #if BITS_PER_LONG == 32
63583 static inline u32 bstat(char *name)
63584 {
63585 struct stat64 stat;
63586 - if (sys_stat64(name, &stat) != 0)
63587 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63588 return 0;
63589 if (!S_ISBLK(stat.st_mode))
63590 return 0;
63591 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63592 static inline u32 bstat(char *name)
63593 {
63594 struct stat stat;
63595 - if (sys_newstat(name, &stat) != 0)
63596 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63597 return 0;
63598 if (!S_ISBLK(stat.st_mode))
63599 return 0;
63600 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63601 index 3098a38..253064e 100644
63602 --- a/init/do_mounts_initrd.c
63603 +++ b/init/do_mounts_initrd.c
63604 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
63605 create_dev("/dev/root.old", Root_RAM0);
63606 /* mount initrd on rootfs' /root */
63607 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63608 - sys_mkdir("/old", 0700);
63609 - root_fd = sys_open("/", 0, 0);
63610 - old_fd = sys_open("/old", 0, 0);
63611 + sys_mkdir((const char __force_user *)"/old", 0700);
63612 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
63613 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63614 /* move initrd over / and chdir/chroot in initrd root */
63615 - sys_chdir("/root");
63616 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63617 - sys_chroot(".");
63618 + sys_chdir((const char __force_user *)"/root");
63619 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63620 + sys_chroot((const char __force_user *)".");
63621
63622 /*
63623 * In case that a resume from disk is carried out by linuxrc or one of
63624 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
63625
63626 /* move initrd to rootfs' /old */
63627 sys_fchdir(old_fd);
63628 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
63629 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63630 /* switch root and cwd back to / of rootfs */
63631 sys_fchdir(root_fd);
63632 - sys_chroot(".");
63633 + sys_chroot((const char __force_user *)".");
63634 sys_close(old_fd);
63635 sys_close(root_fd);
63636
63637 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63638 - sys_chdir("/old");
63639 + sys_chdir((const char __force_user *)"/old");
63640 return;
63641 }
63642
63643 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
63644 mount_root();
63645
63646 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
63647 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
63648 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
63649 if (!error)
63650 printk("okay\n");
63651 else {
63652 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
63653 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
63654 if (error == -ENOENT)
63655 printk("/initrd does not exist. Ignored.\n");
63656 else
63657 printk("failed\n");
63658 printk(KERN_NOTICE "Unmounting old root\n");
63659 - sys_umount("/old", MNT_DETACH);
63660 + sys_umount((char __force_user *)"/old", MNT_DETACH);
63661 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
63662 if (fd < 0) {
63663 error = fd;
63664 @@ -116,11 +116,11 @@ int __init initrd_load(void)
63665 * mounted in the normal path.
63666 */
63667 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
63668 - sys_unlink("/initrd.image");
63669 + sys_unlink((const char __force_user *)"/initrd.image");
63670 handle_initrd();
63671 return 1;
63672 }
63673 }
63674 - sys_unlink("/initrd.image");
63675 + sys_unlink((const char __force_user *)"/initrd.image");
63676 return 0;
63677 }
63678 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
63679 index 32c4799..c27ee74 100644
63680 --- a/init/do_mounts_md.c
63681 +++ b/init/do_mounts_md.c
63682 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
63683 partitioned ? "_d" : "", minor,
63684 md_setup_args[ent].device_names);
63685
63686 - fd = sys_open(name, 0, 0);
63687 + fd = sys_open((char __force_user *)name, 0, 0);
63688 if (fd < 0) {
63689 printk(KERN_ERR "md: open failed - cannot start "
63690 "array %s\n", name);
63691 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
63692 * array without it
63693 */
63694 sys_close(fd);
63695 - fd = sys_open(name, 0, 0);
63696 + fd = sys_open((char __force_user *)name, 0, 0);
63697 sys_ioctl(fd, BLKRRPART, 0);
63698 }
63699 sys_close(fd);
63700 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
63701
63702 wait_for_device_probe();
63703
63704 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
63705 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
63706 if (fd >= 0) {
63707 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
63708 sys_close(fd);
63709 diff --git a/init/initramfs.c b/init/initramfs.c
63710 index 8216c30..25e8e32 100644
63711 --- a/init/initramfs.c
63712 +++ b/init/initramfs.c
63713 @@ -74,7 +74,7 @@ static void __init free_hash(void)
63714 }
63715 }
63716
63717 -static long __init do_utime(char __user *filename, time_t mtime)
63718 +static long __init do_utime(__force char __user *filename, time_t mtime)
63719 {
63720 struct timespec t[2];
63721
63722 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
63723 struct dir_entry *de, *tmp;
63724 list_for_each_entry_safe(de, tmp, &dir_list, list) {
63725 list_del(&de->list);
63726 - do_utime(de->name, de->mtime);
63727 + do_utime((char __force_user *)de->name, de->mtime);
63728 kfree(de->name);
63729 kfree(de);
63730 }
63731 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
63732 if (nlink >= 2) {
63733 char *old = find_link(major, minor, ino, mode, collected);
63734 if (old)
63735 - return (sys_link(old, collected) < 0) ? -1 : 1;
63736 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
63737 }
63738 return 0;
63739 }
63740 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
63741 {
63742 struct stat st;
63743
63744 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
63745 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
63746 if (S_ISDIR(st.st_mode))
63747 - sys_rmdir(path);
63748 + sys_rmdir((char __force_user *)path);
63749 else
63750 - sys_unlink(path);
63751 + sys_unlink((char __force_user *)path);
63752 }
63753 }
63754
63755 @@ -305,7 +305,7 @@ static int __init do_name(void)
63756 int openflags = O_WRONLY|O_CREAT;
63757 if (ml != 1)
63758 openflags |= O_TRUNC;
63759 - wfd = sys_open(collected, openflags, mode);
63760 + wfd = sys_open((char __force_user *)collected, openflags, mode);
63761
63762 if (wfd >= 0) {
63763 sys_fchown(wfd, uid, gid);
63764 @@ -317,17 +317,17 @@ static int __init do_name(void)
63765 }
63766 }
63767 } else if (S_ISDIR(mode)) {
63768 - sys_mkdir(collected, mode);
63769 - sys_chown(collected, uid, gid);
63770 - sys_chmod(collected, mode);
63771 + sys_mkdir((char __force_user *)collected, mode);
63772 + sys_chown((char __force_user *)collected, uid, gid);
63773 + sys_chmod((char __force_user *)collected, mode);
63774 dir_add(collected, mtime);
63775 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
63776 S_ISFIFO(mode) || S_ISSOCK(mode)) {
63777 if (maybe_link() == 0) {
63778 - sys_mknod(collected, mode, rdev);
63779 - sys_chown(collected, uid, gid);
63780 - sys_chmod(collected, mode);
63781 - do_utime(collected, mtime);
63782 + sys_mknod((char __force_user *)collected, mode, rdev);
63783 + sys_chown((char __force_user *)collected, uid, gid);
63784 + sys_chmod((char __force_user *)collected, mode);
63785 + do_utime((char __force_user *)collected, mtime);
63786 }
63787 }
63788 return 0;
63789 @@ -336,15 +336,15 @@ static int __init do_name(void)
63790 static int __init do_copy(void)
63791 {
63792 if (count >= body_len) {
63793 - sys_write(wfd, victim, body_len);
63794 + sys_write(wfd, (char __force_user *)victim, body_len);
63795 sys_close(wfd);
63796 - do_utime(vcollected, mtime);
63797 + do_utime((char __force_user *)vcollected, mtime);
63798 kfree(vcollected);
63799 eat(body_len);
63800 state = SkipIt;
63801 return 0;
63802 } else {
63803 - sys_write(wfd, victim, count);
63804 + sys_write(wfd, (char __force_user *)victim, count);
63805 body_len -= count;
63806 eat(count);
63807 return 1;
63808 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
63809 {
63810 collected[N_ALIGN(name_len) + body_len] = '\0';
63811 clean_path(collected, 0);
63812 - sys_symlink(collected + N_ALIGN(name_len), collected);
63813 - sys_lchown(collected, uid, gid);
63814 - do_utime(collected, mtime);
63815 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
63816 + sys_lchown((char __force_user *)collected, uid, gid);
63817 + do_utime((char __force_user *)collected, mtime);
63818 state = SkipIt;
63819 next_state = Reset;
63820 return 0;
63821 diff --git a/init/main.c b/init/main.c
63822 index ff49a6d..5fa0429 100644
63823 --- a/init/main.c
63824 +++ b/init/main.c
63825 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
63826 extern void tc_init(void);
63827 #endif
63828
63829 +extern void grsecurity_init(void);
63830 +
63831 /*
63832 * Debug helper: via this flag we know that we are in 'early bootup code'
63833 * where only the boot processor is running with IRQ disabled. This means
63834 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
63835
63836 __setup("reset_devices", set_reset_devices);
63837
63838 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
63839 +extern char pax_enter_kernel_user[];
63840 +extern char pax_exit_kernel_user[];
63841 +extern pgdval_t clone_pgd_mask;
63842 +#endif
63843 +
63844 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
63845 +static int __init setup_pax_nouderef(char *str)
63846 +{
63847 +#ifdef CONFIG_X86_32
63848 + unsigned int cpu;
63849 + struct desc_struct *gdt;
63850 +
63851 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
63852 + gdt = get_cpu_gdt_table(cpu);
63853 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
63854 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
63855 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
63856 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
63857 + }
63858 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
63859 +#else
63860 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
63861 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
63862 + clone_pgd_mask = ~(pgdval_t)0UL;
63863 +#endif
63864 +
63865 + return 0;
63866 +}
63867 +early_param("pax_nouderef", setup_pax_nouderef);
63868 +#endif
63869 +
63870 +#ifdef CONFIG_PAX_SOFTMODE
63871 +int pax_softmode;
63872 +
63873 +static int __init setup_pax_softmode(char *str)
63874 +{
63875 + get_option(&str, &pax_softmode);
63876 + return 1;
63877 +}
63878 +__setup("pax_softmode=", setup_pax_softmode);
63879 +#endif
63880 +
63881 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
63882 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
63883 static const char *panic_later, *panic_param;
63884 @@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
63885 {
63886 int count = preempt_count();
63887 int ret;
63888 + const char *msg1 = "", *msg2 = "";
63889
63890 if (initcall_debug)
63891 ret = do_one_initcall_debug(fn);
63892 @@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
63893 sprintf(msgbuf, "error code %d ", ret);
63894
63895 if (preempt_count() != count) {
63896 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
63897 + msg1 = " preemption imbalance";
63898 preempt_count() = count;
63899 }
63900 if (irqs_disabled()) {
63901 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
63902 + msg2 = " disabled interrupts";
63903 local_irq_enable();
63904 }
63905 - if (msgbuf[0]) {
63906 - printk("initcall %pF returned with %s\n", fn, msgbuf);
63907 + if (msgbuf[0] || *msg1 || *msg2) {
63908 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
63909 }
63910
63911 return ret;
63912 @@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
63913 do_basic_setup();
63914
63915 /* Open the /dev/console on the rootfs, this should never fail */
63916 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
63917 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
63918 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
63919
63920 (void) sys_dup(0);
63921 @@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
63922 if (!ramdisk_execute_command)
63923 ramdisk_execute_command = "/init";
63924
63925 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
63926 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
63927 ramdisk_execute_command = NULL;
63928 prepare_namespace();
63929 }
63930
63931 + grsecurity_init();
63932 +
63933 /*
63934 * Ok, we have completed the initial bootup, and
63935 * we're essentially up and running. Get rid of the
63936 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
63937 index 86ee272..773d937 100644
63938 --- a/ipc/mqueue.c
63939 +++ b/ipc/mqueue.c
63940 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
63941 mq_bytes = (mq_msg_tblsz +
63942 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
63943
63944 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
63945 spin_lock(&mq_lock);
63946 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
63947 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
63948 diff --git a/ipc/msg.c b/ipc/msg.c
63949 index 7385de2..a8180e08 100644
63950 --- a/ipc/msg.c
63951 +++ b/ipc/msg.c
63952 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
63953 return security_msg_queue_associate(msq, msgflg);
63954 }
63955
63956 +static struct ipc_ops msg_ops = {
63957 + .getnew = newque,
63958 + .associate = msg_security,
63959 + .more_checks = NULL
63960 +};
63961 +
63962 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
63963 {
63964 struct ipc_namespace *ns;
63965 - struct ipc_ops msg_ops;
63966 struct ipc_params msg_params;
63967
63968 ns = current->nsproxy->ipc_ns;
63969
63970 - msg_ops.getnew = newque;
63971 - msg_ops.associate = msg_security;
63972 - msg_ops.more_checks = NULL;
63973 -
63974 msg_params.key = key;
63975 msg_params.flg = msgflg;
63976
63977 diff --git a/ipc/sem.c b/ipc/sem.c
63978 index 5215a81..cfc0cac 100644
63979 --- a/ipc/sem.c
63980 +++ b/ipc/sem.c
63981 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
63982 return 0;
63983 }
63984
63985 +static struct ipc_ops sem_ops = {
63986 + .getnew = newary,
63987 + .associate = sem_security,
63988 + .more_checks = sem_more_checks
63989 +};
63990 +
63991 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
63992 {
63993 struct ipc_namespace *ns;
63994 - struct ipc_ops sem_ops;
63995 struct ipc_params sem_params;
63996
63997 ns = current->nsproxy->ipc_ns;
63998 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
63999 if (nsems < 0 || nsems > ns->sc_semmsl)
64000 return -EINVAL;
64001
64002 - sem_ops.getnew = newary;
64003 - sem_ops.associate = sem_security;
64004 - sem_ops.more_checks = sem_more_checks;
64005 -
64006 sem_params.key = key;
64007 sem_params.flg = semflg;
64008 sem_params.u.nsems = nsems;
64009 diff --git a/ipc/shm.c b/ipc/shm.c
64010 index b76be5b..859e750 100644
64011 --- a/ipc/shm.c
64012 +++ b/ipc/shm.c
64013 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64014 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64015 #endif
64016
64017 +#ifdef CONFIG_GRKERNSEC
64018 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64019 + const time_t shm_createtime, const uid_t cuid,
64020 + const int shmid);
64021 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64022 + const time_t shm_createtime);
64023 +#endif
64024 +
64025 void shm_init_ns(struct ipc_namespace *ns)
64026 {
64027 ns->shm_ctlmax = SHMMAX;
64028 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64029 shp->shm_lprid = 0;
64030 shp->shm_atim = shp->shm_dtim = 0;
64031 shp->shm_ctim = get_seconds();
64032 +#ifdef CONFIG_GRKERNSEC
64033 + {
64034 + struct timespec timeval;
64035 + do_posix_clock_monotonic_gettime(&timeval);
64036 +
64037 + shp->shm_createtime = timeval.tv_sec;
64038 + }
64039 +#endif
64040 shp->shm_segsz = size;
64041 shp->shm_nattch = 0;
64042 shp->shm_file = file;
64043 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64044 return 0;
64045 }
64046
64047 +static struct ipc_ops shm_ops = {
64048 + .getnew = newseg,
64049 + .associate = shm_security,
64050 + .more_checks = shm_more_checks
64051 +};
64052 +
64053 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64054 {
64055 struct ipc_namespace *ns;
64056 - struct ipc_ops shm_ops;
64057 struct ipc_params shm_params;
64058
64059 ns = current->nsproxy->ipc_ns;
64060
64061 - shm_ops.getnew = newseg;
64062 - shm_ops.associate = shm_security;
64063 - shm_ops.more_checks = shm_more_checks;
64064 -
64065 shm_params.key = key;
64066 shm_params.flg = shmflg;
64067 shm_params.u.size = size;
64068 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64069 f_mode = FMODE_READ | FMODE_WRITE;
64070 }
64071 if (shmflg & SHM_EXEC) {
64072 +
64073 +#ifdef CONFIG_PAX_MPROTECT
64074 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64075 + goto out;
64076 +#endif
64077 +
64078 prot |= PROT_EXEC;
64079 acc_mode |= S_IXUGO;
64080 }
64081 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64082 if (err)
64083 goto out_unlock;
64084
64085 +#ifdef CONFIG_GRKERNSEC
64086 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64087 + shp->shm_perm.cuid, shmid) ||
64088 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64089 + err = -EACCES;
64090 + goto out_unlock;
64091 + }
64092 +#endif
64093 +
64094 path = shp->shm_file->f_path;
64095 path_get(&path);
64096 shp->shm_nattch++;
64097 +#ifdef CONFIG_GRKERNSEC
64098 + shp->shm_lapid = current->pid;
64099 +#endif
64100 size = i_size_read(path.dentry->d_inode);
64101 shm_unlock(shp);
64102
64103 diff --git a/kernel/acct.c b/kernel/acct.c
64104 index 02e6167..54824f7 100644
64105 --- a/kernel/acct.c
64106 +++ b/kernel/acct.c
64107 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64108 */
64109 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64110 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64111 - file->f_op->write(file, (char *)&ac,
64112 + file->f_op->write(file, (char __force_user *)&ac,
64113 sizeof(acct_t), &file->f_pos);
64114 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64115 set_fs(fs);
64116 diff --git a/kernel/audit.c b/kernel/audit.c
64117 index bb0eb5b..cf2a03a 100644
64118 --- a/kernel/audit.c
64119 +++ b/kernel/audit.c
64120 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64121 3) suppressed due to audit_rate_limit
64122 4) suppressed due to audit_backlog_limit
64123 */
64124 -static atomic_t audit_lost = ATOMIC_INIT(0);
64125 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64126
64127 /* The netlink socket. */
64128 static struct sock *audit_sock;
64129 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64130 unsigned long now;
64131 int print;
64132
64133 - atomic_inc(&audit_lost);
64134 + atomic_inc_unchecked(&audit_lost);
64135
64136 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64137
64138 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64139 printk(KERN_WARNING
64140 "audit: audit_lost=%d audit_rate_limit=%d "
64141 "audit_backlog_limit=%d\n",
64142 - atomic_read(&audit_lost),
64143 + atomic_read_unchecked(&audit_lost),
64144 audit_rate_limit,
64145 audit_backlog_limit);
64146 audit_panic(message);
64147 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64148 status_set.pid = audit_pid;
64149 status_set.rate_limit = audit_rate_limit;
64150 status_set.backlog_limit = audit_backlog_limit;
64151 - status_set.lost = atomic_read(&audit_lost);
64152 + status_set.lost = atomic_read_unchecked(&audit_lost);
64153 status_set.backlog = skb_queue_len(&audit_skb_queue);
64154 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64155 &status_set, sizeof(status_set));
64156 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64157 index af1de0f..06dfe57 100644
64158 --- a/kernel/auditsc.c
64159 +++ b/kernel/auditsc.c
64160 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64161 }
64162
64163 /* global counter which is incremented every time something logs in */
64164 -static atomic_t session_id = ATOMIC_INIT(0);
64165 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64166
64167 /**
64168 * audit_set_loginuid - set current task's audit_context loginuid
64169 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64170 return -EPERM;
64171 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64172
64173 - sessionid = atomic_inc_return(&session_id);
64174 + sessionid = atomic_inc_return_unchecked(&session_id);
64175 if (context && context->in_syscall) {
64176 struct audit_buffer *ab;
64177
64178 diff --git a/kernel/capability.c b/kernel/capability.c
64179 index 3f1adb6..c564db0 100644
64180 --- a/kernel/capability.c
64181 +++ b/kernel/capability.c
64182 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64183 * before modification is attempted and the application
64184 * fails.
64185 */
64186 + if (tocopy > ARRAY_SIZE(kdata))
64187 + return -EFAULT;
64188 +
64189 if (copy_to_user(dataptr, kdata, tocopy
64190 * sizeof(struct __user_cap_data_struct))) {
64191 return -EFAULT;
64192 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64193 int ret;
64194
64195 rcu_read_lock();
64196 - ret = security_capable(__task_cred(t), ns, cap);
64197 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64198 + gr_task_is_capable(t, __task_cred(t), cap);
64199 rcu_read_unlock();
64200
64201 - return (ret == 0);
64202 + return ret;
64203 }
64204
64205 /**
64206 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64207 int ret;
64208
64209 rcu_read_lock();
64210 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
64211 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64212 rcu_read_unlock();
64213
64214 - return (ret == 0);
64215 + return ret;
64216 }
64217
64218 /**
64219 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64220 BUG();
64221 }
64222
64223 - if (security_capable(current_cred(), ns, cap) == 0) {
64224 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64225 current->flags |= PF_SUPERPRIV;
64226 return true;
64227 }
64228 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64229 }
64230 EXPORT_SYMBOL(ns_capable);
64231
64232 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64233 +{
64234 + if (unlikely(!cap_valid(cap))) {
64235 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64236 + BUG();
64237 + }
64238 +
64239 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64240 + current->flags |= PF_SUPERPRIV;
64241 + return true;
64242 + }
64243 + return false;
64244 +}
64245 +EXPORT_SYMBOL(ns_capable_nolog);
64246 +
64247 /**
64248 * capable - Determine if the current task has a superior capability in effect
64249 * @cap: The capability to be tested for
64250 @@ -408,6 +427,12 @@ bool capable(int cap)
64251 }
64252 EXPORT_SYMBOL(capable);
64253
64254 +bool capable_nolog(int cap)
64255 +{
64256 + return ns_capable_nolog(&init_user_ns, cap);
64257 +}
64258 +EXPORT_SYMBOL(capable_nolog);
64259 +
64260 /**
64261 * nsown_capable - Check superior capability to one's own user_ns
64262 * @cap: The capability in question
64263 diff --git a/kernel/compat.c b/kernel/compat.c
64264 index f346ced..aa2b1f4 100644
64265 --- a/kernel/compat.c
64266 +++ b/kernel/compat.c
64267 @@ -13,6 +13,7 @@
64268
64269 #include <linux/linkage.h>
64270 #include <linux/compat.h>
64271 +#include <linux/module.h>
64272 #include <linux/errno.h>
64273 #include <linux/time.h>
64274 #include <linux/signal.h>
64275 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64276 mm_segment_t oldfs;
64277 long ret;
64278
64279 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64280 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64281 oldfs = get_fs();
64282 set_fs(KERNEL_DS);
64283 ret = hrtimer_nanosleep_restart(restart);
64284 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64285 oldfs = get_fs();
64286 set_fs(KERNEL_DS);
64287 ret = hrtimer_nanosleep(&tu,
64288 - rmtp ? (struct timespec __user *)&rmt : NULL,
64289 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64290 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64291 set_fs(oldfs);
64292
64293 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64294 mm_segment_t old_fs = get_fs();
64295
64296 set_fs(KERNEL_DS);
64297 - ret = sys_sigpending((old_sigset_t __user *) &s);
64298 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64299 set_fs(old_fs);
64300 if (ret == 0)
64301 ret = put_user(s, set);
64302 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
64303 old_fs = get_fs();
64304 set_fs(KERNEL_DS);
64305 ret = sys_sigprocmask(how,
64306 - set ? (old_sigset_t __user *) &s : NULL,
64307 - oset ? (old_sigset_t __user *) &s : NULL);
64308 + set ? (old_sigset_t __force_user *) &s : NULL,
64309 + oset ? (old_sigset_t __force_user *) &s : NULL);
64310 set_fs(old_fs);
64311 if (ret == 0)
64312 if (oset)
64313 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64314 mm_segment_t old_fs = get_fs();
64315
64316 set_fs(KERNEL_DS);
64317 - ret = sys_old_getrlimit(resource, &r);
64318 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64319 set_fs(old_fs);
64320
64321 if (!ret) {
64322 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64323 mm_segment_t old_fs = get_fs();
64324
64325 set_fs(KERNEL_DS);
64326 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64327 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64328 set_fs(old_fs);
64329
64330 if (ret)
64331 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64332 set_fs (KERNEL_DS);
64333 ret = sys_wait4(pid,
64334 (stat_addr ?
64335 - (unsigned int __user *) &status : NULL),
64336 - options, (struct rusage __user *) &r);
64337 + (unsigned int __force_user *) &status : NULL),
64338 + options, (struct rusage __force_user *) &r);
64339 set_fs (old_fs);
64340
64341 if (ret > 0) {
64342 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64343 memset(&info, 0, sizeof(info));
64344
64345 set_fs(KERNEL_DS);
64346 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64347 - uru ? (struct rusage __user *)&ru : NULL);
64348 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64349 + uru ? (struct rusage __force_user *)&ru : NULL);
64350 set_fs(old_fs);
64351
64352 if ((ret < 0) || (info.si_signo == 0))
64353 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64354 oldfs = get_fs();
64355 set_fs(KERNEL_DS);
64356 err = sys_timer_settime(timer_id, flags,
64357 - (struct itimerspec __user *) &newts,
64358 - (struct itimerspec __user *) &oldts);
64359 + (struct itimerspec __force_user *) &newts,
64360 + (struct itimerspec __force_user *) &oldts);
64361 set_fs(oldfs);
64362 if (!err && old && put_compat_itimerspec(old, &oldts))
64363 return -EFAULT;
64364 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64365 oldfs = get_fs();
64366 set_fs(KERNEL_DS);
64367 err = sys_timer_gettime(timer_id,
64368 - (struct itimerspec __user *) &ts);
64369 + (struct itimerspec __force_user *) &ts);
64370 set_fs(oldfs);
64371 if (!err && put_compat_itimerspec(setting, &ts))
64372 return -EFAULT;
64373 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64374 oldfs = get_fs();
64375 set_fs(KERNEL_DS);
64376 err = sys_clock_settime(which_clock,
64377 - (struct timespec __user *) &ts);
64378 + (struct timespec __force_user *) &ts);
64379 set_fs(oldfs);
64380 return err;
64381 }
64382 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64383 oldfs = get_fs();
64384 set_fs(KERNEL_DS);
64385 err = sys_clock_gettime(which_clock,
64386 - (struct timespec __user *) &ts);
64387 + (struct timespec __force_user *) &ts);
64388 set_fs(oldfs);
64389 if (!err && put_compat_timespec(&ts, tp))
64390 return -EFAULT;
64391 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64392
64393 oldfs = get_fs();
64394 set_fs(KERNEL_DS);
64395 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64396 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64397 set_fs(oldfs);
64398
64399 err = compat_put_timex(utp, &txc);
64400 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64401 oldfs = get_fs();
64402 set_fs(KERNEL_DS);
64403 err = sys_clock_getres(which_clock,
64404 - (struct timespec __user *) &ts);
64405 + (struct timespec __force_user *) &ts);
64406 set_fs(oldfs);
64407 if (!err && tp && put_compat_timespec(&ts, tp))
64408 return -EFAULT;
64409 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64410 long err;
64411 mm_segment_t oldfs;
64412 struct timespec tu;
64413 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64414 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64415
64416 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64417 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64418 oldfs = get_fs();
64419 set_fs(KERNEL_DS);
64420 err = clock_nanosleep_restart(restart);
64421 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64422 oldfs = get_fs();
64423 set_fs(KERNEL_DS);
64424 err = sys_clock_nanosleep(which_clock, flags,
64425 - (struct timespec __user *) &in,
64426 - (struct timespec __user *) &out);
64427 + (struct timespec __force_user *) &in,
64428 + (struct timespec __force_user *) &out);
64429 set_fs(oldfs);
64430
64431 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64432 diff --git a/kernel/configs.c b/kernel/configs.c
64433 index 42e8fa0..9e7406b 100644
64434 --- a/kernel/configs.c
64435 +++ b/kernel/configs.c
64436 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64437 struct proc_dir_entry *entry;
64438
64439 /* create the current config file */
64440 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64441 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64442 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64443 + &ikconfig_file_ops);
64444 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64445 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64446 + &ikconfig_file_ops);
64447 +#endif
64448 +#else
64449 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64450 &ikconfig_file_ops);
64451 +#endif
64452 +
64453 if (!entry)
64454 return -ENOMEM;
64455
64456 diff --git a/kernel/cred.c b/kernel/cred.c
64457 index 5791612..a3c04dc 100644
64458 --- a/kernel/cred.c
64459 +++ b/kernel/cred.c
64460 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
64461 validate_creds(cred);
64462 put_cred(cred);
64463 }
64464 +
64465 +#ifdef CONFIG_GRKERNSEC_SETXID
64466 + cred = (struct cred *) tsk->delayed_cred;
64467 + if (cred) {
64468 + tsk->delayed_cred = NULL;
64469 + validate_creds(cred);
64470 + put_cred(cred);
64471 + }
64472 +#endif
64473 }
64474
64475 /**
64476 @@ -470,7 +479,7 @@ error_put:
64477 * Always returns 0 thus allowing this function to be tail-called at the end
64478 * of, say, sys_setgid().
64479 */
64480 -int commit_creds(struct cred *new)
64481 +static int __commit_creds(struct cred *new)
64482 {
64483 struct task_struct *task = current;
64484 const struct cred *old = task->real_cred;
64485 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
64486
64487 get_cred(new); /* we will require a ref for the subj creds too */
64488
64489 + gr_set_role_label(task, new->uid, new->gid);
64490 +
64491 /* dumpability changes */
64492 if (old->euid != new->euid ||
64493 old->egid != new->egid ||
64494 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
64495 put_cred(old);
64496 return 0;
64497 }
64498 +#ifdef CONFIG_GRKERNSEC_SETXID
64499 +extern int set_user(struct cred *new);
64500 +
64501 +void gr_delayed_cred_worker(void)
64502 +{
64503 + const struct cred *new = current->delayed_cred;
64504 + struct cred *ncred;
64505 +
64506 + current->delayed_cred = NULL;
64507 +
64508 + if (current_uid() && new != NULL) {
64509 + // from doing get_cred on it when queueing this
64510 + put_cred(new);
64511 + return;
64512 + } else if (new == NULL)
64513 + return;
64514 +
64515 + ncred = prepare_creds();
64516 + if (!ncred)
64517 + goto die;
64518 + // uids
64519 + ncred->uid = new->uid;
64520 + ncred->euid = new->euid;
64521 + ncred->suid = new->suid;
64522 + ncred->fsuid = new->fsuid;
64523 + // gids
64524 + ncred->gid = new->gid;
64525 + ncred->egid = new->egid;
64526 + ncred->sgid = new->sgid;
64527 + ncred->fsgid = new->fsgid;
64528 + // groups
64529 + if (set_groups(ncred, new->group_info) < 0) {
64530 + abort_creds(ncred);
64531 + goto die;
64532 + }
64533 + // caps
64534 + ncred->securebits = new->securebits;
64535 + ncred->cap_inheritable = new->cap_inheritable;
64536 + ncred->cap_permitted = new->cap_permitted;
64537 + ncred->cap_effective = new->cap_effective;
64538 + ncred->cap_bset = new->cap_bset;
64539 +
64540 + if (set_user(ncred)) {
64541 + abort_creds(ncred);
64542 + goto die;
64543 + }
64544 +
64545 + // from doing get_cred on it when queueing this
64546 + put_cred(new);
64547 +
64548 + __commit_creds(ncred);
64549 + return;
64550 +die:
64551 + // from doing get_cred on it when queueing this
64552 + put_cred(new);
64553 + do_group_exit(SIGKILL);
64554 +}
64555 +#endif
64556 +
64557 +int commit_creds(struct cred *new)
64558 +{
64559 +#ifdef CONFIG_GRKERNSEC_SETXID
64560 + struct task_struct *t;
64561 +
64562 + /* we won't get called with tasklist_lock held for writing
64563 + and interrupts disabled as the cred struct in that case is
64564 + init_cred
64565 + */
64566 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64567 + !current_uid() && new->uid) {
64568 + rcu_read_lock();
64569 + read_lock(&tasklist_lock);
64570 + for (t = next_thread(current); t != current;
64571 + t = next_thread(t)) {
64572 + if (t->delayed_cred == NULL) {
64573 + t->delayed_cred = get_cred(new);
64574 + set_tsk_need_resched(t);
64575 + }
64576 + }
64577 + read_unlock(&tasklist_lock);
64578 + rcu_read_unlock();
64579 + }
64580 +#endif
64581 + return __commit_creds(new);
64582 +}
64583 +
64584 EXPORT_SYMBOL(commit_creds);
64585
64586 /**
64587 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64588 index 0d7c087..01b8cef 100644
64589 --- a/kernel/debug/debug_core.c
64590 +++ b/kernel/debug/debug_core.c
64591 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64592 */
64593 static atomic_t masters_in_kgdb;
64594 static atomic_t slaves_in_kgdb;
64595 -static atomic_t kgdb_break_tasklet_var;
64596 +static atomic_unchecked_t kgdb_break_tasklet_var;
64597 atomic_t kgdb_setting_breakpoint;
64598
64599 struct task_struct *kgdb_usethread;
64600 @@ -129,7 +129,7 @@ int kgdb_single_step;
64601 static pid_t kgdb_sstep_pid;
64602
64603 /* to keep track of the CPU which is doing the single stepping*/
64604 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64605 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64606
64607 /*
64608 * If you are debugging a problem where roundup (the collection of
64609 @@ -542,7 +542,7 @@ return_normal:
64610 * kernel will only try for the value of sstep_tries before
64611 * giving up and continuing on.
64612 */
64613 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64614 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64615 (kgdb_info[cpu].task &&
64616 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64617 atomic_set(&kgdb_active, -1);
64618 @@ -636,8 +636,8 @@ cpu_master_loop:
64619 }
64620
64621 kgdb_restore:
64622 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64623 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64624 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64625 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64626 if (kgdb_info[sstep_cpu].task)
64627 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64628 else
64629 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
64630 static void kgdb_tasklet_bpt(unsigned long ing)
64631 {
64632 kgdb_breakpoint();
64633 - atomic_set(&kgdb_break_tasklet_var, 0);
64634 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64635 }
64636
64637 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64638
64639 void kgdb_schedule_breakpoint(void)
64640 {
64641 - if (atomic_read(&kgdb_break_tasklet_var) ||
64642 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
64643 atomic_read(&kgdb_active) != -1 ||
64644 atomic_read(&kgdb_setting_breakpoint))
64645 return;
64646 - atomic_inc(&kgdb_break_tasklet_var);
64647 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
64648 tasklet_schedule(&kgdb_tasklet_breakpoint);
64649 }
64650 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
64651 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
64652 index e2ae734..08a4c5c 100644
64653 --- a/kernel/debug/kdb/kdb_main.c
64654 +++ b/kernel/debug/kdb/kdb_main.c
64655 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
64656 list_for_each_entry(mod, kdb_modules, list) {
64657
64658 kdb_printf("%-20s%8u 0x%p ", mod->name,
64659 - mod->core_size, (void *)mod);
64660 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
64661 #ifdef CONFIG_MODULE_UNLOAD
64662 kdb_printf("%4ld ", module_refcount(mod));
64663 #endif
64664 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
64665 kdb_printf(" (Loading)");
64666 else
64667 kdb_printf(" (Live)");
64668 - kdb_printf(" 0x%p", mod->module_core);
64669 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64670
64671 #ifdef CONFIG_MODULE_UNLOAD
64672 {
64673 diff --git a/kernel/events/core.c b/kernel/events/core.c
64674 index 1b5c081..c375f83 100644
64675 --- a/kernel/events/core.c
64676 +++ b/kernel/events/core.c
64677 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
64678 return 0;
64679 }
64680
64681 -static atomic64_t perf_event_id;
64682 +static atomic64_unchecked_t perf_event_id;
64683
64684 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
64685 enum event_type_t event_type);
64686 @@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
64687
64688 static inline u64 perf_event_count(struct perf_event *event)
64689 {
64690 - return local64_read(&event->count) + atomic64_read(&event->child_count);
64691 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
64692 }
64693
64694 static u64 perf_event_read(struct perf_event *event)
64695 @@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
64696 mutex_lock(&event->child_mutex);
64697 total += perf_event_read(event);
64698 *enabled += event->total_time_enabled +
64699 - atomic64_read(&event->child_total_time_enabled);
64700 + atomic64_read_unchecked(&event->child_total_time_enabled);
64701 *running += event->total_time_running +
64702 - atomic64_read(&event->child_total_time_running);
64703 + atomic64_read_unchecked(&event->child_total_time_running);
64704
64705 list_for_each_entry(child, &event->child_list, child_list) {
64706 total += perf_event_read(child);
64707 @@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
64708 userpg->offset -= local64_read(&event->hw.prev_count);
64709
64710 userpg->time_enabled = enabled +
64711 - atomic64_read(&event->child_total_time_enabled);
64712 + atomic64_read_unchecked(&event->child_total_time_enabled);
64713
64714 userpg->time_running = running +
64715 - atomic64_read(&event->child_total_time_running);
64716 + atomic64_read_unchecked(&event->child_total_time_running);
64717
64718 barrier();
64719 ++userpg->lock;
64720 @@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
64721 values[n++] = perf_event_count(event);
64722 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64723 values[n++] = enabled +
64724 - atomic64_read(&event->child_total_time_enabled);
64725 + atomic64_read_unchecked(&event->child_total_time_enabled);
64726 }
64727 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64728 values[n++] = running +
64729 - atomic64_read(&event->child_total_time_running);
64730 + atomic64_read_unchecked(&event->child_total_time_running);
64731 }
64732 if (read_format & PERF_FORMAT_ID)
64733 values[n++] = primary_event_id(event);
64734 @@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
64735 * need to add enough zero bytes after the string to handle
64736 * the 64bit alignment we do later.
64737 */
64738 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
64739 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
64740 if (!buf) {
64741 name = strncpy(tmp, "//enomem", sizeof(tmp));
64742 goto got_name;
64743 }
64744 - name = d_path(&file->f_path, buf, PATH_MAX);
64745 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
64746 if (IS_ERR(name)) {
64747 name = strncpy(tmp, "//toolong", sizeof(tmp));
64748 goto got_name;
64749 @@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
64750 event->parent = parent_event;
64751
64752 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64753 - event->id = atomic64_inc_return(&perf_event_id);
64754 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64755
64756 event->state = PERF_EVENT_STATE_INACTIVE;
64757
64758 @@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
64759 /*
64760 * Add back the child's count to the parent's count:
64761 */
64762 - atomic64_add(child_val, &parent_event->child_count);
64763 - atomic64_add(child_event->total_time_enabled,
64764 + atomic64_add_unchecked(child_val, &parent_event->child_count);
64765 + atomic64_add_unchecked(child_event->total_time_enabled,
64766 &parent_event->child_total_time_enabled);
64767 - atomic64_add(child_event->total_time_running,
64768 + atomic64_add_unchecked(child_event->total_time_running,
64769 &parent_event->child_total_time_running);
64770
64771 /*
64772 diff --git a/kernel/exit.c b/kernel/exit.c
64773 index 4b4042f..5bdd8d5 100644
64774 --- a/kernel/exit.c
64775 +++ b/kernel/exit.c
64776 @@ -58,6 +58,10 @@
64777 #include <asm/pgtable.h>
64778 #include <asm/mmu_context.h>
64779
64780 +#ifdef CONFIG_GRKERNSEC
64781 +extern rwlock_t grsec_exec_file_lock;
64782 +#endif
64783 +
64784 static void exit_mm(struct task_struct * tsk);
64785
64786 static void __unhash_process(struct task_struct *p, bool group_dead)
64787 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
64788 struct task_struct *leader;
64789 int zap_leader;
64790 repeat:
64791 +#ifdef CONFIG_NET
64792 + gr_del_task_from_ip_table(p);
64793 +#endif
64794 +
64795 /* don't need to get the RCU readlock here - the process is dead and
64796 * can't be modifying its own credentials. But shut RCU-lockdep up */
64797 rcu_read_lock();
64798 @@ -381,7 +389,7 @@ int allow_signal(int sig)
64799 * know it'll be handled, so that they don't get converted to
64800 * SIGKILL or just silently dropped.
64801 */
64802 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
64803 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
64804 recalc_sigpending();
64805 spin_unlock_irq(&current->sighand->siglock);
64806 return 0;
64807 @@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
64808 vsnprintf(current->comm, sizeof(current->comm), name, args);
64809 va_end(args);
64810
64811 +#ifdef CONFIG_GRKERNSEC
64812 + write_lock(&grsec_exec_file_lock);
64813 + if (current->exec_file) {
64814 + fput(current->exec_file);
64815 + current->exec_file = NULL;
64816 + }
64817 + write_unlock(&grsec_exec_file_lock);
64818 +#endif
64819 +
64820 + gr_set_kernel_label(current);
64821 +
64822 /*
64823 * If we were started as result of loading a module, close all of the
64824 * user space pages. We don't need them, and if we didn't close them
64825 @@ -892,6 +911,8 @@ void do_exit(long code)
64826 struct task_struct *tsk = current;
64827 int group_dead;
64828
64829 + set_fs(USER_DS);
64830 +
64831 profile_task_exit(tsk);
64832
64833 WARN_ON(blk_needs_flush_plug(tsk));
64834 @@ -908,7 +929,6 @@ void do_exit(long code)
64835 * mm_release()->clear_child_tid() from writing to a user-controlled
64836 * kernel address.
64837 */
64838 - set_fs(USER_DS);
64839
64840 ptrace_event(PTRACE_EVENT_EXIT, code);
64841
64842 @@ -969,6 +989,9 @@ void do_exit(long code)
64843 tsk->exit_code = code;
64844 taskstats_exit(tsk, group_dead);
64845
64846 + gr_acl_handle_psacct(tsk, code);
64847 + gr_acl_handle_exit();
64848 +
64849 exit_mm(tsk);
64850
64851 if (group_dead)
64852 @@ -1085,7 +1108,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
64853 * Take down every thread in the group. This is called by fatal signals
64854 * as well as by sys_exit_group (below).
64855 */
64856 -void
64857 +__noreturn void
64858 do_group_exit(int exit_code)
64859 {
64860 struct signal_struct *sig = current->signal;
64861 diff --git a/kernel/fork.c b/kernel/fork.c
64862 index 26a7a67..a1053f9 100644
64863 --- a/kernel/fork.c
64864 +++ b/kernel/fork.c
64865 @@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
64866 *stackend = STACK_END_MAGIC; /* for overflow detection */
64867
64868 #ifdef CONFIG_CC_STACKPROTECTOR
64869 - tsk->stack_canary = get_random_int();
64870 + tsk->stack_canary = pax_get_random_long();
64871 #endif
64872
64873 /*
64874 @@ -308,13 +308,77 @@ out:
64875 }
64876
64877 #ifdef CONFIG_MMU
64878 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
64879 +{
64880 + struct vm_area_struct *tmp;
64881 + unsigned long charge;
64882 + struct mempolicy *pol;
64883 + struct file *file;
64884 +
64885 + charge = 0;
64886 + if (mpnt->vm_flags & VM_ACCOUNT) {
64887 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
64888 + if (security_vm_enough_memory(len))
64889 + goto fail_nomem;
64890 + charge = len;
64891 + }
64892 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64893 + if (!tmp)
64894 + goto fail_nomem;
64895 + *tmp = *mpnt;
64896 + tmp->vm_mm = mm;
64897 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
64898 + pol = mpol_dup(vma_policy(mpnt));
64899 + if (IS_ERR(pol))
64900 + goto fail_nomem_policy;
64901 + vma_set_policy(tmp, pol);
64902 + if (anon_vma_fork(tmp, mpnt))
64903 + goto fail_nomem_anon_vma_fork;
64904 + tmp->vm_flags &= ~VM_LOCKED;
64905 + tmp->vm_next = tmp->vm_prev = NULL;
64906 + tmp->vm_mirror = NULL;
64907 + file = tmp->vm_file;
64908 + if (file) {
64909 + struct inode *inode = file->f_path.dentry->d_inode;
64910 + struct address_space *mapping = file->f_mapping;
64911 +
64912 + get_file(file);
64913 + if (tmp->vm_flags & VM_DENYWRITE)
64914 + atomic_dec(&inode->i_writecount);
64915 + mutex_lock(&mapping->i_mmap_mutex);
64916 + if (tmp->vm_flags & VM_SHARED)
64917 + mapping->i_mmap_writable++;
64918 + flush_dcache_mmap_lock(mapping);
64919 + /* insert tmp into the share list, just after mpnt */
64920 + vma_prio_tree_add(tmp, mpnt);
64921 + flush_dcache_mmap_unlock(mapping);
64922 + mutex_unlock(&mapping->i_mmap_mutex);
64923 + }
64924 +
64925 + /*
64926 + * Clear hugetlb-related page reserves for children. This only
64927 + * affects MAP_PRIVATE mappings. Faults generated by the child
64928 + * are not guaranteed to succeed, even if read-only
64929 + */
64930 + if (is_vm_hugetlb_page(tmp))
64931 + reset_vma_resv_huge_pages(tmp);
64932 +
64933 + return tmp;
64934 +
64935 +fail_nomem_anon_vma_fork:
64936 + mpol_put(pol);
64937 +fail_nomem_policy:
64938 + kmem_cache_free(vm_area_cachep, tmp);
64939 +fail_nomem:
64940 + vm_unacct_memory(charge);
64941 + return NULL;
64942 +}
64943 +
64944 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64945 {
64946 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
64947 struct rb_node **rb_link, *rb_parent;
64948 int retval;
64949 - unsigned long charge;
64950 - struct mempolicy *pol;
64951
64952 down_write(&oldmm->mmap_sem);
64953 flush_cache_dup_mm(oldmm);
64954 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64955 mm->locked_vm = 0;
64956 mm->mmap = NULL;
64957 mm->mmap_cache = NULL;
64958 - mm->free_area_cache = oldmm->mmap_base;
64959 - mm->cached_hole_size = ~0UL;
64960 + mm->free_area_cache = oldmm->free_area_cache;
64961 + mm->cached_hole_size = oldmm->cached_hole_size;
64962 mm->map_count = 0;
64963 cpumask_clear(mm_cpumask(mm));
64964 mm->mm_rb = RB_ROOT;
64965 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64966
64967 prev = NULL;
64968 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
64969 - struct file *file;
64970 -
64971 if (mpnt->vm_flags & VM_DONTCOPY) {
64972 long pages = vma_pages(mpnt);
64973 mm->total_vm -= pages;
64974 @@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
64975 -pages);
64976 continue;
64977 }
64978 - charge = 0;
64979 - if (mpnt->vm_flags & VM_ACCOUNT) {
64980 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
64981 - if (security_vm_enough_memory(len))
64982 - goto fail_nomem;
64983 - charge = len;
64984 + tmp = dup_vma(mm, mpnt);
64985 + if (!tmp) {
64986 + retval = -ENOMEM;
64987 + goto out;
64988 }
64989 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64990 - if (!tmp)
64991 - goto fail_nomem;
64992 - *tmp = *mpnt;
64993 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
64994 - pol = mpol_dup(vma_policy(mpnt));
64995 - retval = PTR_ERR(pol);
64996 - if (IS_ERR(pol))
64997 - goto fail_nomem_policy;
64998 - vma_set_policy(tmp, pol);
64999 - tmp->vm_mm = mm;
65000 - if (anon_vma_fork(tmp, mpnt))
65001 - goto fail_nomem_anon_vma_fork;
65002 - tmp->vm_flags &= ~VM_LOCKED;
65003 - tmp->vm_next = tmp->vm_prev = NULL;
65004 - file = tmp->vm_file;
65005 - if (file) {
65006 - struct inode *inode = file->f_path.dentry->d_inode;
65007 - struct address_space *mapping = file->f_mapping;
65008 -
65009 - get_file(file);
65010 - if (tmp->vm_flags & VM_DENYWRITE)
65011 - atomic_dec(&inode->i_writecount);
65012 - mutex_lock(&mapping->i_mmap_mutex);
65013 - if (tmp->vm_flags & VM_SHARED)
65014 - mapping->i_mmap_writable++;
65015 - flush_dcache_mmap_lock(mapping);
65016 - /* insert tmp into the share list, just after mpnt */
65017 - vma_prio_tree_add(tmp, mpnt);
65018 - flush_dcache_mmap_unlock(mapping);
65019 - mutex_unlock(&mapping->i_mmap_mutex);
65020 - }
65021 -
65022 - /*
65023 - * Clear hugetlb-related page reserves for children. This only
65024 - * affects MAP_PRIVATE mappings. Faults generated by the child
65025 - * are not guaranteed to succeed, even if read-only
65026 - */
65027 - if (is_vm_hugetlb_page(tmp))
65028 - reset_vma_resv_huge_pages(tmp);
65029
65030 /*
65031 * Link in the new vma and copy the page table entries.
65032 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65033 if (retval)
65034 goto out;
65035 }
65036 +
65037 +#ifdef CONFIG_PAX_SEGMEXEC
65038 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65039 + struct vm_area_struct *mpnt_m;
65040 +
65041 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65042 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65043 +
65044 + if (!mpnt->vm_mirror)
65045 + continue;
65046 +
65047 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65048 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65049 + mpnt->vm_mirror = mpnt_m;
65050 + } else {
65051 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65052 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65053 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65054 + mpnt->vm_mirror->vm_mirror = mpnt;
65055 + }
65056 + }
65057 + BUG_ON(mpnt_m);
65058 + }
65059 +#endif
65060 +
65061 /* a new mm has just been created */
65062 arch_dup_mmap(oldmm, mm);
65063 retval = 0;
65064 @@ -429,14 +474,6 @@ out:
65065 flush_tlb_mm(oldmm);
65066 up_write(&oldmm->mmap_sem);
65067 return retval;
65068 -fail_nomem_anon_vma_fork:
65069 - mpol_put(pol);
65070 -fail_nomem_policy:
65071 - kmem_cache_free(vm_area_cachep, tmp);
65072 -fail_nomem:
65073 - retval = -ENOMEM;
65074 - vm_unacct_memory(charge);
65075 - goto out;
65076 }
65077
65078 static inline int mm_alloc_pgd(struct mm_struct *mm)
65079 @@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65080 return ERR_PTR(err);
65081
65082 mm = get_task_mm(task);
65083 - if (mm && mm != current->mm &&
65084 - !ptrace_may_access(task, mode)) {
65085 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65086 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65087 mmput(mm);
65088 mm = ERR_PTR(-EACCES);
65089 }
65090 @@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65091 spin_unlock(&fs->lock);
65092 return -EAGAIN;
65093 }
65094 - fs->users++;
65095 + atomic_inc(&fs->users);
65096 spin_unlock(&fs->lock);
65097 return 0;
65098 }
65099 tsk->fs = copy_fs_struct(fs);
65100 if (!tsk->fs)
65101 return -ENOMEM;
65102 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65103 return 0;
65104 }
65105
65106 @@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65107 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65108 #endif
65109 retval = -EAGAIN;
65110 +
65111 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65112 +
65113 if (atomic_read(&p->real_cred->user->processes) >=
65114 task_rlimit(p, RLIMIT_NPROC)) {
65115 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65116 @@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65117 if (clone_flags & CLONE_THREAD)
65118 p->tgid = current->tgid;
65119
65120 + gr_copy_label(p);
65121 +
65122 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65123 /*
65124 * Clear TID on mm_release()?
65125 @@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
65126 bad_fork_free:
65127 free_task(p);
65128 fork_out:
65129 + gr_log_forkfail(retval);
65130 +
65131 return ERR_PTR(retval);
65132 }
65133
65134 @@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
65135 if (clone_flags & CLONE_PARENT_SETTID)
65136 put_user(nr, parent_tidptr);
65137
65138 + gr_handle_brute_check();
65139 +
65140 if (clone_flags & CLONE_VFORK) {
65141 p->vfork_done = &vfork;
65142 init_completion(&vfork);
65143 @@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65144 return 0;
65145
65146 /* don't need lock here; in the worst case we'll do useless copy */
65147 - if (fs->users == 1)
65148 + if (atomic_read(&fs->users) == 1)
65149 return 0;
65150
65151 *new_fsp = copy_fs_struct(fs);
65152 @@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65153 fs = current->fs;
65154 spin_lock(&fs->lock);
65155 current->fs = new_fs;
65156 - if (--fs->users)
65157 + gr_set_chroot_entries(current, &current->fs->root);
65158 + if (atomic_dec_return(&fs->users))
65159 new_fs = NULL;
65160 else
65161 new_fs = fs;
65162 diff --git a/kernel/futex.c b/kernel/futex.c
65163 index 1614be2..37abc7e 100644
65164 --- a/kernel/futex.c
65165 +++ b/kernel/futex.c
65166 @@ -54,6 +54,7 @@
65167 #include <linux/mount.h>
65168 #include <linux/pagemap.h>
65169 #include <linux/syscalls.h>
65170 +#include <linux/ptrace.h>
65171 #include <linux/signal.h>
65172 #include <linux/export.h>
65173 #include <linux/magic.h>
65174 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65175 struct page *page, *page_head;
65176 int err, ro = 0;
65177
65178 +#ifdef CONFIG_PAX_SEGMEXEC
65179 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65180 + return -EFAULT;
65181 +#endif
65182 +
65183 /*
65184 * The futex address must be "naturally" aligned.
65185 */
65186 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65187 if (!p)
65188 goto err_unlock;
65189 ret = -EPERM;
65190 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65191 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65192 + goto err_unlock;
65193 +#endif
65194 pcred = __task_cred(p);
65195 /* If victim is in different user_ns, then uids are not
65196 comparable, so we must have CAP_SYS_PTRACE */
65197 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
65198 {
65199 u32 curval;
65200 int i;
65201 + mm_segment_t oldfs;
65202
65203 /*
65204 * This will fail and we want it. Some arch implementations do
65205 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
65206 * implementation, the non-functional ones will return
65207 * -ENOSYS.
65208 */
65209 + oldfs = get_fs();
65210 + set_fs(USER_DS);
65211 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65212 futex_cmpxchg_enabled = 1;
65213 + set_fs(oldfs);
65214
65215 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65216 plist_head_init(&futex_queues[i].chain);
65217 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65218 index 5f9e689..582d46d 100644
65219 --- a/kernel/futex_compat.c
65220 +++ b/kernel/futex_compat.c
65221 @@ -10,6 +10,7 @@
65222 #include <linux/compat.h>
65223 #include <linux/nsproxy.h>
65224 #include <linux/futex.h>
65225 +#include <linux/ptrace.h>
65226
65227 #include <asm/uaccess.h>
65228
65229 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65230 {
65231 struct compat_robust_list_head __user *head;
65232 unsigned long ret;
65233 - const struct cred *cred = current_cred(), *pcred;
65234 + const struct cred *cred = current_cred();
65235 + const struct cred *pcred;
65236
65237 if (!futex_cmpxchg_enabled)
65238 return -ENOSYS;
65239 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65240 if (!p)
65241 goto err_unlock;
65242 ret = -EPERM;
65243 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65244 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65245 + goto err_unlock;
65246 +#endif
65247 pcred = __task_cred(p);
65248 /* If victim is in different user_ns, then uids are not
65249 comparable, so we must have CAP_SYS_PTRACE */
65250 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65251 index 9b22d03..6295b62 100644
65252 --- a/kernel/gcov/base.c
65253 +++ b/kernel/gcov/base.c
65254 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65255 }
65256
65257 #ifdef CONFIG_MODULES
65258 -static inline int within(void *addr, void *start, unsigned long size)
65259 -{
65260 - return ((addr >= start) && (addr < start + size));
65261 -}
65262 -
65263 /* Update list and generate events when modules are unloaded. */
65264 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65265 void *data)
65266 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65267 prev = NULL;
65268 /* Remove entries located in module from linked list. */
65269 for (info = gcov_info_head; info; info = info->next) {
65270 - if (within(info, mod->module_core, mod->core_size)) {
65271 + if (within_module_core_rw((unsigned long)info, mod)) {
65272 if (prev)
65273 prev->next = info->next;
65274 else
65275 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65276 index ae34bf5..4e2f3d0 100644
65277 --- a/kernel/hrtimer.c
65278 +++ b/kernel/hrtimer.c
65279 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65280 local_irq_restore(flags);
65281 }
65282
65283 -static void run_hrtimer_softirq(struct softirq_action *h)
65284 +static void run_hrtimer_softirq(void)
65285 {
65286 hrtimer_peek_ahead_timers();
65287 }
65288 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65289 index 01d3b70..9e4d098 100644
65290 --- a/kernel/jump_label.c
65291 +++ b/kernel/jump_label.c
65292 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65293
65294 size = (((unsigned long)stop - (unsigned long)start)
65295 / sizeof(struct jump_entry));
65296 + pax_open_kernel();
65297 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65298 + pax_close_kernel();
65299 }
65300
65301 static void jump_label_update(struct jump_label_key *key, int enable);
65302 @@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65303 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65304 struct jump_entry *iter;
65305
65306 + pax_open_kernel();
65307 for (iter = iter_start; iter < iter_stop; iter++) {
65308 if (within_module_init(iter->code, mod))
65309 iter->code = 0;
65310 }
65311 + pax_close_kernel();
65312 }
65313
65314 static int
65315 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65316 index 079f1d3..a407562 100644
65317 --- a/kernel/kallsyms.c
65318 +++ b/kernel/kallsyms.c
65319 @@ -11,6 +11,9 @@
65320 * Changed the compression method from stem compression to "table lookup"
65321 * compression (see scripts/kallsyms.c for a more complete description)
65322 */
65323 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65324 +#define __INCLUDED_BY_HIDESYM 1
65325 +#endif
65326 #include <linux/kallsyms.h>
65327 #include <linux/module.h>
65328 #include <linux/init.h>
65329 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65330
65331 static inline int is_kernel_inittext(unsigned long addr)
65332 {
65333 + if (system_state != SYSTEM_BOOTING)
65334 + return 0;
65335 +
65336 if (addr >= (unsigned long)_sinittext
65337 && addr <= (unsigned long)_einittext)
65338 return 1;
65339 return 0;
65340 }
65341
65342 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65343 +#ifdef CONFIG_MODULES
65344 +static inline int is_module_text(unsigned long addr)
65345 +{
65346 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65347 + return 1;
65348 +
65349 + addr = ktla_ktva(addr);
65350 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65351 +}
65352 +#else
65353 +static inline int is_module_text(unsigned long addr)
65354 +{
65355 + return 0;
65356 +}
65357 +#endif
65358 +#endif
65359 +
65360 static inline int is_kernel_text(unsigned long addr)
65361 {
65362 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65363 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65364
65365 static inline int is_kernel(unsigned long addr)
65366 {
65367 +
65368 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65369 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65370 + return 1;
65371 +
65372 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65373 +#else
65374 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65375 +#endif
65376 +
65377 return 1;
65378 return in_gate_area_no_mm(addr);
65379 }
65380
65381 static int is_ksym_addr(unsigned long addr)
65382 {
65383 +
65384 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65385 + if (is_module_text(addr))
65386 + return 0;
65387 +#endif
65388 +
65389 if (all_var)
65390 return is_kernel(addr);
65391
65392 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65393
65394 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65395 {
65396 - iter->name[0] = '\0';
65397 iter->nameoff = get_symbol_offset(new_pos);
65398 iter->pos = new_pos;
65399 }
65400 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65401 {
65402 struct kallsym_iter *iter = m->private;
65403
65404 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65405 + if (current_uid())
65406 + return 0;
65407 +#endif
65408 +
65409 /* Some debugging symbols have no name. Ignore them. */
65410 if (!iter->name[0])
65411 return 0;
65412 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65413 struct kallsym_iter *iter;
65414 int ret;
65415
65416 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65417 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65418 if (!iter)
65419 return -ENOMEM;
65420 reset_iter(iter, 0);
65421 diff --git a/kernel/kexec.c b/kernel/kexec.c
65422 index 7b08867..3bac516 100644
65423 --- a/kernel/kexec.c
65424 +++ b/kernel/kexec.c
65425 @@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65426 unsigned long flags)
65427 {
65428 struct compat_kexec_segment in;
65429 - struct kexec_segment out, __user *ksegments;
65430 + struct kexec_segment out;
65431 + struct kexec_segment __user *ksegments;
65432 unsigned long i, result;
65433
65434 /* Don't allow clients that don't understand the native
65435 diff --git a/kernel/kmod.c b/kernel/kmod.c
65436 index a0a8854..642b106 100644
65437 --- a/kernel/kmod.c
65438 +++ b/kernel/kmod.c
65439 @@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
65440 * If module auto-loading support is disabled then this function
65441 * becomes a no-operation.
65442 */
65443 -int __request_module(bool wait, const char *fmt, ...)
65444 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65445 {
65446 - va_list args;
65447 char module_name[MODULE_NAME_LEN];
65448 unsigned int max_modprobes;
65449 int ret;
65450 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
65451 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
65452 static char *envp[] = { "HOME=/",
65453 "TERM=linux",
65454 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
65455 @@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
65456 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65457 static int kmod_loop_msg;
65458
65459 - va_start(args, fmt);
65460 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65461 - va_end(args);
65462 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65463 if (ret >= MODULE_NAME_LEN)
65464 return -ENAMETOOLONG;
65465
65466 @@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
65467 if (ret)
65468 return ret;
65469
65470 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65471 + if (!current_uid()) {
65472 + /* hack to workaround consolekit/udisks stupidity */
65473 + read_lock(&tasklist_lock);
65474 + if (!strcmp(current->comm, "mount") &&
65475 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65476 + read_unlock(&tasklist_lock);
65477 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65478 + return -EPERM;
65479 + }
65480 + read_unlock(&tasklist_lock);
65481 + }
65482 +#endif
65483 +
65484 /* If modprobe needs a service that is in a module, we get a recursive
65485 * loop. Limit the number of running kmod threads to max_threads/2 or
65486 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65487 @@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
65488 atomic_dec(&kmod_concurrent);
65489 return ret;
65490 }
65491 +
65492 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65493 +{
65494 + va_list args;
65495 + int ret;
65496 +
65497 + va_start(args, fmt);
65498 + ret = ____request_module(wait, module_param, fmt, args);
65499 + va_end(args);
65500 +
65501 + return ret;
65502 +}
65503 +
65504 +int __request_module(bool wait, const char *fmt, ...)
65505 +{
65506 + va_list args;
65507 + int ret;
65508 +
65509 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65510 + if (current_uid()) {
65511 + char module_param[MODULE_NAME_LEN];
65512 +
65513 + memset(module_param, 0, sizeof(module_param));
65514 +
65515 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65516 +
65517 + va_start(args, fmt);
65518 + ret = ____request_module(wait, module_param, fmt, args);
65519 + va_end(args);
65520 +
65521 + return ret;
65522 + }
65523 +#endif
65524 +
65525 + va_start(args, fmt);
65526 + ret = ____request_module(wait, NULL, fmt, args);
65527 + va_end(args);
65528 +
65529 + return ret;
65530 +}
65531 +
65532 EXPORT_SYMBOL(__request_module);
65533 #endif /* CONFIG_MODULES */
65534
65535 @@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
65536 *
65537 * Thus the __user pointer cast is valid here.
65538 */
65539 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65540 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65541
65542 /*
65543 * If ret is 0, either ____call_usermodehelper failed and the
65544 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65545 index c62b854..cb67968 100644
65546 --- a/kernel/kprobes.c
65547 +++ b/kernel/kprobes.c
65548 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65549 * kernel image and loaded module images reside. This is required
65550 * so x86_64 can correctly handle the %rip-relative fixups.
65551 */
65552 - kip->insns = module_alloc(PAGE_SIZE);
65553 + kip->insns = module_alloc_exec(PAGE_SIZE);
65554 if (!kip->insns) {
65555 kfree(kip);
65556 return NULL;
65557 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65558 */
65559 if (!list_is_singular(&kip->list)) {
65560 list_del(&kip->list);
65561 - module_free(NULL, kip->insns);
65562 + module_free_exec(NULL, kip->insns);
65563 kfree(kip);
65564 }
65565 return 1;
65566 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65567 {
65568 int i, err = 0;
65569 unsigned long offset = 0, size = 0;
65570 - char *modname, namebuf[128];
65571 + char *modname, namebuf[KSYM_NAME_LEN];
65572 const char *symbol_name;
65573 void *addr;
65574 struct kprobe_blackpoint *kb;
65575 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65576 const char *sym = NULL;
65577 unsigned int i = *(loff_t *) v;
65578 unsigned long offset = 0;
65579 - char *modname, namebuf[128];
65580 + char *modname, namebuf[KSYM_NAME_LEN];
65581
65582 head = &kprobe_table[i];
65583 preempt_disable();
65584 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65585 index 8889f7d..95319b7 100644
65586 --- a/kernel/lockdep.c
65587 +++ b/kernel/lockdep.c
65588 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
65589 end = (unsigned long) &_end,
65590 addr = (unsigned long) obj;
65591
65592 +#ifdef CONFIG_PAX_KERNEXEC
65593 + start = ktla_ktva(start);
65594 +#endif
65595 +
65596 /*
65597 * static variable?
65598 */
65599 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65600 if (!static_obj(lock->key)) {
65601 debug_locks_off();
65602 printk("INFO: trying to register non-static key.\n");
65603 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65604 printk("the code is fine but needs lockdep annotation.\n");
65605 printk("turning off the locking correctness validator.\n");
65606 dump_stack();
65607 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65608 if (!class)
65609 return 0;
65610 }
65611 - atomic_inc((atomic_t *)&class->ops);
65612 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65613 if (very_verbose(class)) {
65614 printk("\nacquire class [%p] %s", class->key, class->name);
65615 if (class->name_version > 1)
65616 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65617 index 91c32a0..b2c71c5 100644
65618 --- a/kernel/lockdep_proc.c
65619 +++ b/kernel/lockdep_proc.c
65620 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65621
65622 static void print_name(struct seq_file *m, struct lock_class *class)
65623 {
65624 - char str[128];
65625 + char str[KSYM_NAME_LEN];
65626 const char *name = class->name;
65627
65628 if (!name) {
65629 diff --git a/kernel/module.c b/kernel/module.c
65630 index 2c93276..476fe81 100644
65631 --- a/kernel/module.c
65632 +++ b/kernel/module.c
65633 @@ -58,6 +58,7 @@
65634 #include <linux/jump_label.h>
65635 #include <linux/pfn.h>
65636 #include <linux/bsearch.h>
65637 +#include <linux/grsecurity.h>
65638
65639 #define CREATE_TRACE_POINTS
65640 #include <trace/events/module.h>
65641 @@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65642
65643 /* Bounds of module allocation, for speeding __module_address.
65644 * Protected by module_mutex. */
65645 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
65646 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
65647 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
65648
65649 int register_module_notifier(struct notifier_block * nb)
65650 {
65651 @@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65652 return true;
65653
65654 list_for_each_entry_rcu(mod, &modules, list) {
65655 - struct symsearch arr[] = {
65656 + struct symsearch modarr[] = {
65657 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
65658 NOT_GPL_ONLY, false },
65659 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
65660 @@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65661 #endif
65662 };
65663
65664 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
65665 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
65666 return true;
65667 }
65668 return false;
65669 @@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
65670 static int percpu_modalloc(struct module *mod,
65671 unsigned long size, unsigned long align)
65672 {
65673 - if (align > PAGE_SIZE) {
65674 + if (align-1 >= PAGE_SIZE) {
65675 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
65676 mod->name, align, PAGE_SIZE);
65677 align = PAGE_SIZE;
65678 @@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
65679 static ssize_t show_coresize(struct module_attribute *mattr,
65680 struct module_kobject *mk, char *buffer)
65681 {
65682 - return sprintf(buffer, "%u\n", mk->mod->core_size);
65683 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
65684 }
65685
65686 static struct module_attribute modinfo_coresize =
65687 @@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
65688 static ssize_t show_initsize(struct module_attribute *mattr,
65689 struct module_kobject *mk, char *buffer)
65690 {
65691 - return sprintf(buffer, "%u\n", mk->mod->init_size);
65692 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
65693 }
65694
65695 static struct module_attribute modinfo_initsize =
65696 @@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
65697 */
65698 #ifdef CONFIG_SYSFS
65699
65700 -#ifdef CONFIG_KALLSYMS
65701 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65702 static inline bool sect_empty(const Elf_Shdr *sect)
65703 {
65704 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
65705 @@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
65706
65707 static void unset_module_core_ro_nx(struct module *mod)
65708 {
65709 - set_page_attributes(mod->module_core + mod->core_text_size,
65710 - mod->module_core + mod->core_size,
65711 + set_page_attributes(mod->module_core_rw,
65712 + mod->module_core_rw + mod->core_size_rw,
65713 set_memory_x);
65714 - set_page_attributes(mod->module_core,
65715 - mod->module_core + mod->core_ro_size,
65716 + set_page_attributes(mod->module_core_rx,
65717 + mod->module_core_rx + mod->core_size_rx,
65718 set_memory_rw);
65719 }
65720
65721 static void unset_module_init_ro_nx(struct module *mod)
65722 {
65723 - set_page_attributes(mod->module_init + mod->init_text_size,
65724 - mod->module_init + mod->init_size,
65725 + set_page_attributes(mod->module_init_rw,
65726 + mod->module_init_rw + mod->init_size_rw,
65727 set_memory_x);
65728 - set_page_attributes(mod->module_init,
65729 - mod->module_init + mod->init_ro_size,
65730 + set_page_attributes(mod->module_init_rx,
65731 + mod->module_init_rx + mod->init_size_rx,
65732 set_memory_rw);
65733 }
65734
65735 @@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
65736
65737 mutex_lock(&module_mutex);
65738 list_for_each_entry_rcu(mod, &modules, list) {
65739 - if ((mod->module_core) && (mod->core_text_size)) {
65740 - set_page_attributes(mod->module_core,
65741 - mod->module_core + mod->core_text_size,
65742 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
65743 + set_page_attributes(mod->module_core_rx,
65744 + mod->module_core_rx + mod->core_size_rx,
65745 set_memory_rw);
65746 }
65747 - if ((mod->module_init) && (mod->init_text_size)) {
65748 - set_page_attributes(mod->module_init,
65749 - mod->module_init + mod->init_text_size,
65750 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
65751 + set_page_attributes(mod->module_init_rx,
65752 + mod->module_init_rx + mod->init_size_rx,
65753 set_memory_rw);
65754 }
65755 }
65756 @@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
65757
65758 mutex_lock(&module_mutex);
65759 list_for_each_entry_rcu(mod, &modules, list) {
65760 - if ((mod->module_core) && (mod->core_text_size)) {
65761 - set_page_attributes(mod->module_core,
65762 - mod->module_core + mod->core_text_size,
65763 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
65764 + set_page_attributes(mod->module_core_rx,
65765 + mod->module_core_rx + mod->core_size_rx,
65766 set_memory_ro);
65767 }
65768 - if ((mod->module_init) && (mod->init_text_size)) {
65769 - set_page_attributes(mod->module_init,
65770 - mod->module_init + mod->init_text_size,
65771 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
65772 + set_page_attributes(mod->module_init_rx,
65773 + mod->module_init_rx + mod->init_size_rx,
65774 set_memory_ro);
65775 }
65776 }
65777 @@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
65778
65779 /* This may be NULL, but that's OK */
65780 unset_module_init_ro_nx(mod);
65781 - module_free(mod, mod->module_init);
65782 + module_free(mod, mod->module_init_rw);
65783 + module_free_exec(mod, mod->module_init_rx);
65784 kfree(mod->args);
65785 percpu_modfree(mod);
65786
65787 /* Free lock-classes: */
65788 - lockdep_free_key_range(mod->module_core, mod->core_size);
65789 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
65790 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
65791
65792 /* Finally, free the core (containing the module structure) */
65793 unset_module_core_ro_nx(mod);
65794 - module_free(mod, mod->module_core);
65795 + module_free_exec(mod, mod->module_core_rx);
65796 + module_free(mod, mod->module_core_rw);
65797
65798 #ifdef CONFIG_MPU
65799 update_protections(current->mm);
65800 @@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65801 unsigned int i;
65802 int ret = 0;
65803 const struct kernel_symbol *ksym;
65804 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65805 + int is_fs_load = 0;
65806 + int register_filesystem_found = 0;
65807 + char *p;
65808 +
65809 + p = strstr(mod->args, "grsec_modharden_fs");
65810 + if (p) {
65811 + char *endptr = p + strlen("grsec_modharden_fs");
65812 + /* copy \0 as well */
65813 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
65814 + is_fs_load = 1;
65815 + }
65816 +#endif
65817
65818 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
65819 const char *name = info->strtab + sym[i].st_name;
65820
65821 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65822 + /* it's a real shame this will never get ripped and copied
65823 + upstream! ;(
65824 + */
65825 + if (is_fs_load && !strcmp(name, "register_filesystem"))
65826 + register_filesystem_found = 1;
65827 +#endif
65828 +
65829 switch (sym[i].st_shndx) {
65830 case SHN_COMMON:
65831 /* We compiled with -fno-common. These are not
65832 @@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65833 ksym = resolve_symbol_wait(mod, info, name);
65834 /* Ok if resolved. */
65835 if (ksym && !IS_ERR(ksym)) {
65836 + pax_open_kernel();
65837 sym[i].st_value = ksym->value;
65838 + pax_close_kernel();
65839 break;
65840 }
65841
65842 @@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65843 secbase = (unsigned long)mod_percpu(mod);
65844 else
65845 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
65846 + pax_open_kernel();
65847 sym[i].st_value += secbase;
65848 + pax_close_kernel();
65849 break;
65850 }
65851 }
65852
65853 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65854 + if (is_fs_load && !register_filesystem_found) {
65855 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
65856 + ret = -EPERM;
65857 + }
65858 +#endif
65859 +
65860 return ret;
65861 }
65862
65863 @@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
65864 || s->sh_entsize != ~0UL
65865 || strstarts(sname, ".init"))
65866 continue;
65867 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
65868 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
65869 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
65870 + else
65871 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
65872 pr_debug("\t%s\n", sname);
65873 }
65874 - switch (m) {
65875 - case 0: /* executable */
65876 - mod->core_size = debug_align(mod->core_size);
65877 - mod->core_text_size = mod->core_size;
65878 - break;
65879 - case 1: /* RO: text and ro-data */
65880 - mod->core_size = debug_align(mod->core_size);
65881 - mod->core_ro_size = mod->core_size;
65882 - break;
65883 - case 3: /* whole core */
65884 - mod->core_size = debug_align(mod->core_size);
65885 - break;
65886 - }
65887 }
65888
65889 pr_debug("Init section allocation order:\n");
65890 @@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
65891 || s->sh_entsize != ~0UL
65892 || !strstarts(sname, ".init"))
65893 continue;
65894 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
65895 - | INIT_OFFSET_MASK);
65896 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
65897 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
65898 + else
65899 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
65900 + s->sh_entsize |= INIT_OFFSET_MASK;
65901 pr_debug("\t%s\n", sname);
65902 }
65903 - switch (m) {
65904 - case 0: /* executable */
65905 - mod->init_size = debug_align(mod->init_size);
65906 - mod->init_text_size = mod->init_size;
65907 - break;
65908 - case 1: /* RO: text and ro-data */
65909 - mod->init_size = debug_align(mod->init_size);
65910 - mod->init_ro_size = mod->init_size;
65911 - break;
65912 - case 3: /* whole init */
65913 - mod->init_size = debug_align(mod->init_size);
65914 - break;
65915 - }
65916 }
65917 }
65918
65919 @@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
65920
65921 /* Put symbol section at end of init part of module. */
65922 symsect->sh_flags |= SHF_ALLOC;
65923 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
65924 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
65925 info->index.sym) | INIT_OFFSET_MASK;
65926 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
65927
65928 @@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
65929 }
65930
65931 /* Append room for core symbols at end of core part. */
65932 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
65933 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
65934 - mod->core_size += strtab_size;
65935 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
65936 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
65937 + mod->core_size_rx += strtab_size;
65938
65939 /* Put string table section at end of init part of module. */
65940 strsect->sh_flags |= SHF_ALLOC;
65941 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
65942 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
65943 info->index.str) | INIT_OFFSET_MASK;
65944 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
65945 }
65946 @@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
65947 /* Make sure we get permanent strtab: don't use info->strtab. */
65948 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
65949
65950 + pax_open_kernel();
65951 +
65952 /* Set types up while we still have access to sections. */
65953 for (i = 0; i < mod->num_symtab; i++)
65954 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
65955
65956 - mod->core_symtab = dst = mod->module_core + info->symoffs;
65957 - mod->core_strtab = s = mod->module_core + info->stroffs;
65958 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
65959 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
65960 src = mod->symtab;
65961 *dst = *src;
65962 *s++ = 0;
65963 @@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
65964 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
65965 }
65966 mod->core_num_syms = ndst;
65967 +
65968 + pax_close_kernel();
65969 }
65970 #else
65971 static inline void layout_symtab(struct module *mod, struct load_info *info)
65972 @@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
65973 return size == 0 ? NULL : vmalloc_exec(size);
65974 }
65975
65976 -static void *module_alloc_update_bounds(unsigned long size)
65977 +static void *module_alloc_update_bounds_rw(unsigned long size)
65978 {
65979 void *ret = module_alloc(size);
65980
65981 if (ret) {
65982 mutex_lock(&module_mutex);
65983 /* Update module bounds. */
65984 - if ((unsigned long)ret < module_addr_min)
65985 - module_addr_min = (unsigned long)ret;
65986 - if ((unsigned long)ret + size > module_addr_max)
65987 - module_addr_max = (unsigned long)ret + size;
65988 + if ((unsigned long)ret < module_addr_min_rw)
65989 + module_addr_min_rw = (unsigned long)ret;
65990 + if ((unsigned long)ret + size > module_addr_max_rw)
65991 + module_addr_max_rw = (unsigned long)ret + size;
65992 + mutex_unlock(&module_mutex);
65993 + }
65994 + return ret;
65995 +}
65996 +
65997 +static void *module_alloc_update_bounds_rx(unsigned long size)
65998 +{
65999 + void *ret = module_alloc_exec(size);
66000 +
66001 + if (ret) {
66002 + mutex_lock(&module_mutex);
66003 + /* Update module bounds. */
66004 + if ((unsigned long)ret < module_addr_min_rx)
66005 + module_addr_min_rx = (unsigned long)ret;
66006 + if ((unsigned long)ret + size > module_addr_max_rx)
66007 + module_addr_max_rx = (unsigned long)ret + size;
66008 mutex_unlock(&module_mutex);
66009 }
66010 return ret;
66011 @@ -2513,8 +2550,14 @@ static struct module *setup_load_info(struct load_info *info)
66012 static int check_modinfo(struct module *mod, struct load_info *info)
66013 {
66014 const char *modmagic = get_modinfo(info, "vermagic");
66015 + const char *license = get_modinfo(info, "license");
66016 int err;
66017
66018 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66019 + if (!license || !license_is_gpl_compatible(license))
66020 + return -ENOEXEC;
66021 +#endif
66022 +
66023 /* This is allowed: modprobe --force will invalidate it. */
66024 if (!modmagic) {
66025 err = try_to_force_load(mod, "bad vermagic");
66026 @@ -2537,7 +2580,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66027 }
66028
66029 /* Set up license info based on the info section */
66030 - set_license(mod, get_modinfo(info, "license"));
66031 + set_license(mod, license);
66032
66033 return 0;
66034 }
66035 @@ -2631,7 +2674,7 @@ static int move_module(struct module *mod, struct load_info *info)
66036 void *ptr;
66037
66038 /* Do the allocs. */
66039 - ptr = module_alloc_update_bounds(mod->core_size);
66040 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66041 /*
66042 * The pointer to this block is stored in the module structure
66043 * which is inside the block. Just mark it as not being a
66044 @@ -2641,23 +2684,50 @@ static int move_module(struct module *mod, struct load_info *info)
66045 if (!ptr)
66046 return -ENOMEM;
66047
66048 - memset(ptr, 0, mod->core_size);
66049 - mod->module_core = ptr;
66050 + memset(ptr, 0, mod->core_size_rw);
66051 + mod->module_core_rw = ptr;
66052
66053 - ptr = module_alloc_update_bounds(mod->init_size);
66054 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66055 /*
66056 * The pointer to this block is stored in the module structure
66057 * which is inside the block. This block doesn't need to be
66058 * scanned as it contains data and code that will be freed
66059 * after the module is initialized.
66060 */
66061 - kmemleak_ignore(ptr);
66062 - if (!ptr && mod->init_size) {
66063 - module_free(mod, mod->module_core);
66064 + kmemleak_not_leak(ptr);
66065 + if (!ptr && mod->init_size_rw) {
66066 + module_free(mod, mod->module_core_rw);
66067 return -ENOMEM;
66068 }
66069 - memset(ptr, 0, mod->init_size);
66070 - mod->module_init = ptr;
66071 + memset(ptr, 0, mod->init_size_rw);
66072 + mod->module_init_rw = ptr;
66073 +
66074 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66075 + kmemleak_not_leak(ptr);
66076 + if (!ptr) {
66077 + module_free(mod, mod->module_init_rw);
66078 + module_free(mod, mod->module_core_rw);
66079 + return -ENOMEM;
66080 + }
66081 +
66082 + pax_open_kernel();
66083 + memset(ptr, 0, mod->core_size_rx);
66084 + pax_close_kernel();
66085 + mod->module_core_rx = ptr;
66086 +
66087 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66088 + kmemleak_not_leak(ptr);
66089 + if (!ptr && mod->init_size_rx) {
66090 + module_free_exec(mod, mod->module_core_rx);
66091 + module_free(mod, mod->module_init_rw);
66092 + module_free(mod, mod->module_core_rw);
66093 + return -ENOMEM;
66094 + }
66095 +
66096 + pax_open_kernel();
66097 + memset(ptr, 0, mod->init_size_rx);
66098 + pax_close_kernel();
66099 + mod->module_init_rx = ptr;
66100
66101 /* Transfer each section which specifies SHF_ALLOC */
66102 pr_debug("final section addresses:\n");
66103 @@ -2668,16 +2738,45 @@ static int move_module(struct module *mod, struct load_info *info)
66104 if (!(shdr->sh_flags & SHF_ALLOC))
66105 continue;
66106
66107 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66108 - dest = mod->module_init
66109 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66110 - else
66111 - dest = mod->module_core + shdr->sh_entsize;
66112 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66113 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66114 + dest = mod->module_init_rw
66115 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66116 + else
66117 + dest = mod->module_init_rx
66118 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66119 + } else {
66120 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66121 + dest = mod->module_core_rw + shdr->sh_entsize;
66122 + else
66123 + dest = mod->module_core_rx + shdr->sh_entsize;
66124 + }
66125 +
66126 + if (shdr->sh_type != SHT_NOBITS) {
66127 +
66128 +#ifdef CONFIG_PAX_KERNEXEC
66129 +#ifdef CONFIG_X86_64
66130 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66131 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66132 +#endif
66133 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66134 + pax_open_kernel();
66135 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66136 + pax_close_kernel();
66137 + } else
66138 +#endif
66139
66140 - if (shdr->sh_type != SHT_NOBITS)
66141 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66142 + }
66143 /* Update sh_addr to point to copy in image. */
66144 - shdr->sh_addr = (unsigned long)dest;
66145 +
66146 +#ifdef CONFIG_PAX_KERNEXEC
66147 + if (shdr->sh_flags & SHF_EXECINSTR)
66148 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66149 + else
66150 +#endif
66151 +
66152 + shdr->sh_addr = (unsigned long)dest;
66153 pr_debug("\t0x%lx %s\n",
66154 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66155 }
66156 @@ -2728,12 +2827,12 @@ static void flush_module_icache(const struct module *mod)
66157 * Do it before processing of module parameters, so the module
66158 * can provide parameter accessor functions of its own.
66159 */
66160 - if (mod->module_init)
66161 - flush_icache_range((unsigned long)mod->module_init,
66162 - (unsigned long)mod->module_init
66163 - + mod->init_size);
66164 - flush_icache_range((unsigned long)mod->module_core,
66165 - (unsigned long)mod->module_core + mod->core_size);
66166 + if (mod->module_init_rx)
66167 + flush_icache_range((unsigned long)mod->module_init_rx,
66168 + (unsigned long)mod->module_init_rx
66169 + + mod->init_size_rx);
66170 + flush_icache_range((unsigned long)mod->module_core_rx,
66171 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66172
66173 set_fs(old_fs);
66174 }
66175 @@ -2803,8 +2902,10 @@ out:
66176 static void module_deallocate(struct module *mod, struct load_info *info)
66177 {
66178 percpu_modfree(mod);
66179 - module_free(mod, mod->module_init);
66180 - module_free(mod, mod->module_core);
66181 + module_free_exec(mod, mod->module_init_rx);
66182 + module_free_exec(mod, mod->module_core_rx);
66183 + module_free(mod, mod->module_init_rw);
66184 + module_free(mod, mod->module_core_rw);
66185 }
66186
66187 int __weak module_finalize(const Elf_Ehdr *hdr,
66188 @@ -2868,9 +2969,38 @@ static struct module *load_module(void __user *umod,
66189 if (err)
66190 goto free_unload;
66191
66192 + /* Now copy in args */
66193 + mod->args = strndup_user(uargs, ~0UL >> 1);
66194 + if (IS_ERR(mod->args)) {
66195 + err = PTR_ERR(mod->args);
66196 + goto free_unload;
66197 + }
66198 +
66199 /* Set up MODINFO_ATTR fields */
66200 setup_modinfo(mod, &info);
66201
66202 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66203 + {
66204 + char *p, *p2;
66205 +
66206 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66207 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66208 + err = -EPERM;
66209 + goto free_modinfo;
66210 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66211 + p += strlen("grsec_modharden_normal");
66212 + p2 = strstr(p, "_");
66213 + if (p2) {
66214 + *p2 = '\0';
66215 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66216 + *p2 = '_';
66217 + }
66218 + err = -EPERM;
66219 + goto free_modinfo;
66220 + }
66221 + }
66222 +#endif
66223 +
66224 /* Fix up syms, so that st_value is a pointer to location. */
66225 err = simplify_symbols(mod, &info);
66226 if (err < 0)
66227 @@ -2886,13 +3016,6 @@ static struct module *load_module(void __user *umod,
66228
66229 flush_module_icache(mod);
66230
66231 - /* Now copy in args */
66232 - mod->args = strndup_user(uargs, ~0UL >> 1);
66233 - if (IS_ERR(mod->args)) {
66234 - err = PTR_ERR(mod->args);
66235 - goto free_arch_cleanup;
66236 - }
66237 -
66238 /* Mark state as coming so strong_try_module_get() ignores us. */
66239 mod->state = MODULE_STATE_COMING;
66240
66241 @@ -2949,11 +3072,10 @@ static struct module *load_module(void __user *umod,
66242 unlock:
66243 mutex_unlock(&module_mutex);
66244 synchronize_sched();
66245 - kfree(mod->args);
66246 - free_arch_cleanup:
66247 module_arch_cleanup(mod);
66248 free_modinfo:
66249 free_modinfo(mod);
66250 + kfree(mod->args);
66251 free_unload:
66252 module_unload_free(mod);
66253 free_module:
66254 @@ -2994,16 +3116,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66255 MODULE_STATE_COMING, mod);
66256
66257 /* Set RO and NX regions for core */
66258 - set_section_ro_nx(mod->module_core,
66259 - mod->core_text_size,
66260 - mod->core_ro_size,
66261 - mod->core_size);
66262 + set_section_ro_nx(mod->module_core_rx,
66263 + mod->core_size_rx,
66264 + mod->core_size_rx,
66265 + mod->core_size_rx);
66266
66267 /* Set RO and NX regions for init */
66268 - set_section_ro_nx(mod->module_init,
66269 - mod->init_text_size,
66270 - mod->init_ro_size,
66271 - mod->init_size);
66272 + set_section_ro_nx(mod->module_init_rx,
66273 + mod->init_size_rx,
66274 + mod->init_size_rx,
66275 + mod->init_size_rx);
66276
66277 do_mod_ctors(mod);
66278 /* Start the module */
66279 @@ -3049,11 +3171,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66280 mod->strtab = mod->core_strtab;
66281 #endif
66282 unset_module_init_ro_nx(mod);
66283 - module_free(mod, mod->module_init);
66284 - mod->module_init = NULL;
66285 - mod->init_size = 0;
66286 - mod->init_ro_size = 0;
66287 - mod->init_text_size = 0;
66288 + module_free(mod, mod->module_init_rw);
66289 + module_free_exec(mod, mod->module_init_rx);
66290 + mod->module_init_rw = NULL;
66291 + mod->module_init_rx = NULL;
66292 + mod->init_size_rw = 0;
66293 + mod->init_size_rx = 0;
66294 mutex_unlock(&module_mutex);
66295
66296 return 0;
66297 @@ -3084,10 +3207,16 @@ static const char *get_ksymbol(struct module *mod,
66298 unsigned long nextval;
66299
66300 /* At worse, next value is at end of module */
66301 - if (within_module_init(addr, mod))
66302 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66303 + if (within_module_init_rx(addr, mod))
66304 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66305 + else if (within_module_init_rw(addr, mod))
66306 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66307 + else if (within_module_core_rx(addr, mod))
66308 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66309 + else if (within_module_core_rw(addr, mod))
66310 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66311 else
66312 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66313 + return NULL;
66314
66315 /* Scan for closest preceding symbol, and next symbol. (ELF
66316 starts real symbols at 1). */
66317 @@ -3322,7 +3451,7 @@ static int m_show(struct seq_file *m, void *p)
66318 char buf[8];
66319
66320 seq_printf(m, "%s %u",
66321 - mod->name, mod->init_size + mod->core_size);
66322 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66323 print_unload_info(m, mod);
66324
66325 /* Informative for users. */
66326 @@ -3331,7 +3460,7 @@ static int m_show(struct seq_file *m, void *p)
66327 mod->state == MODULE_STATE_COMING ? "Loading":
66328 "Live");
66329 /* Used by oprofile and other similar tools. */
66330 - seq_printf(m, " 0x%pK", mod->module_core);
66331 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66332
66333 /* Taints info */
66334 if (mod->taints)
66335 @@ -3367,7 +3496,17 @@ static const struct file_operations proc_modules_operations = {
66336
66337 static int __init proc_modules_init(void)
66338 {
66339 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66340 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66341 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66342 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66343 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66344 +#else
66345 proc_create("modules", 0, NULL, &proc_modules_operations);
66346 +#endif
66347 +#else
66348 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66349 +#endif
66350 return 0;
66351 }
66352 module_init(proc_modules_init);
66353 @@ -3426,12 +3565,12 @@ struct module *__module_address(unsigned long addr)
66354 {
66355 struct module *mod;
66356
66357 - if (addr < module_addr_min || addr > module_addr_max)
66358 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66359 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66360 return NULL;
66361
66362 list_for_each_entry_rcu(mod, &modules, list)
66363 - if (within_module_core(addr, mod)
66364 - || within_module_init(addr, mod))
66365 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66366 return mod;
66367 return NULL;
66368 }
66369 @@ -3465,11 +3604,20 @@ bool is_module_text_address(unsigned long addr)
66370 */
66371 struct module *__module_text_address(unsigned long addr)
66372 {
66373 - struct module *mod = __module_address(addr);
66374 + struct module *mod;
66375 +
66376 +#ifdef CONFIG_X86_32
66377 + addr = ktla_ktva(addr);
66378 +#endif
66379 +
66380 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66381 + return NULL;
66382 +
66383 + mod = __module_address(addr);
66384 +
66385 if (mod) {
66386 /* Make sure it's within the text section. */
66387 - if (!within(addr, mod->module_init, mod->init_text_size)
66388 - && !within(addr, mod->module_core, mod->core_text_size))
66389 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66390 mod = NULL;
66391 }
66392 return mod;
66393 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66394 index 7e3443f..b2a1e6b 100644
66395 --- a/kernel/mutex-debug.c
66396 +++ b/kernel/mutex-debug.c
66397 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66398 }
66399
66400 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66401 - struct thread_info *ti)
66402 + struct task_struct *task)
66403 {
66404 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66405
66406 /* Mark the current thread as blocked on the lock: */
66407 - ti->task->blocked_on = waiter;
66408 + task->blocked_on = waiter;
66409 }
66410
66411 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66412 - struct thread_info *ti)
66413 + struct task_struct *task)
66414 {
66415 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66416 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66417 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66418 - ti->task->blocked_on = NULL;
66419 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66420 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66421 + task->blocked_on = NULL;
66422
66423 list_del_init(&waiter->list);
66424 waiter->task = NULL;
66425 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66426 index 0799fd3..d06ae3b 100644
66427 --- a/kernel/mutex-debug.h
66428 +++ b/kernel/mutex-debug.h
66429 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66430 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66431 extern void debug_mutex_add_waiter(struct mutex *lock,
66432 struct mutex_waiter *waiter,
66433 - struct thread_info *ti);
66434 + struct task_struct *task);
66435 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66436 - struct thread_info *ti);
66437 + struct task_struct *task);
66438 extern void debug_mutex_unlock(struct mutex *lock);
66439 extern void debug_mutex_init(struct mutex *lock, const char *name,
66440 struct lock_class_key *key);
66441 diff --git a/kernel/mutex.c b/kernel/mutex.c
66442 index 89096dd..f91ebc5 100644
66443 --- a/kernel/mutex.c
66444 +++ b/kernel/mutex.c
66445 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66446 spin_lock_mutex(&lock->wait_lock, flags);
66447
66448 debug_mutex_lock_common(lock, &waiter);
66449 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66450 + debug_mutex_add_waiter(lock, &waiter, task);
66451
66452 /* add waiting tasks to the end of the waitqueue (FIFO): */
66453 list_add_tail(&waiter.list, &lock->wait_list);
66454 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66455 * TASK_UNINTERRUPTIBLE case.)
66456 */
66457 if (unlikely(signal_pending_state(state, task))) {
66458 - mutex_remove_waiter(lock, &waiter,
66459 - task_thread_info(task));
66460 + mutex_remove_waiter(lock, &waiter, task);
66461 mutex_release(&lock->dep_map, 1, ip);
66462 spin_unlock_mutex(&lock->wait_lock, flags);
66463
66464 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66465 done:
66466 lock_acquired(&lock->dep_map, ip);
66467 /* got the lock - rejoice! */
66468 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66469 + mutex_remove_waiter(lock, &waiter, task);
66470 mutex_set_owner(lock);
66471
66472 /* set it to 0 if there are no waiters left: */
66473 diff --git a/kernel/padata.c b/kernel/padata.c
66474 index b452599..5d68f4e 100644
66475 --- a/kernel/padata.c
66476 +++ b/kernel/padata.c
66477 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
66478 padata->pd = pd;
66479 padata->cb_cpu = cb_cpu;
66480
66481 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
66482 - atomic_set(&pd->seq_nr, -1);
66483 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
66484 + atomic_set_unchecked(&pd->seq_nr, -1);
66485
66486 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
66487 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
66488
66489 target_cpu = padata_cpu_hash(padata);
66490 queue = per_cpu_ptr(pd->pqueue, target_cpu);
66491 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
66492 padata_init_pqueues(pd);
66493 padata_init_squeues(pd);
66494 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
66495 - atomic_set(&pd->seq_nr, -1);
66496 + atomic_set_unchecked(&pd->seq_nr, -1);
66497 atomic_set(&pd->reorder_objects, 0);
66498 atomic_set(&pd->refcnt, 0);
66499 pd->pinst = pinst;
66500 diff --git a/kernel/panic.c b/kernel/panic.c
66501 index 80aed44..f291d37 100644
66502 --- a/kernel/panic.c
66503 +++ b/kernel/panic.c
66504 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66505 const char *board;
66506
66507 printk(KERN_WARNING "------------[ cut here ]------------\n");
66508 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66509 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66510 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66511 if (board)
66512 printk(KERN_WARNING "Hardware name: %s\n", board);
66513 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66514 */
66515 void __stack_chk_fail(void)
66516 {
66517 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66518 + dump_stack();
66519 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66520 __builtin_return_address(0));
66521 }
66522 EXPORT_SYMBOL(__stack_chk_fail);
66523 diff --git a/kernel/pid.c b/kernel/pid.c
66524 index 9f08dfa..6765c40 100644
66525 --- a/kernel/pid.c
66526 +++ b/kernel/pid.c
66527 @@ -33,6 +33,7 @@
66528 #include <linux/rculist.h>
66529 #include <linux/bootmem.h>
66530 #include <linux/hash.h>
66531 +#include <linux/security.h>
66532 #include <linux/pid_namespace.h>
66533 #include <linux/init_task.h>
66534 #include <linux/syscalls.h>
66535 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66536
66537 int pid_max = PID_MAX_DEFAULT;
66538
66539 -#define RESERVED_PIDS 300
66540 +#define RESERVED_PIDS 500
66541
66542 int pid_max_min = RESERVED_PIDS + 1;
66543 int pid_max_max = PID_MAX_LIMIT;
66544 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66545 */
66546 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66547 {
66548 + struct task_struct *task;
66549 +
66550 rcu_lockdep_assert(rcu_read_lock_held(),
66551 "find_task_by_pid_ns() needs rcu_read_lock()"
66552 " protection");
66553 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66554 +
66555 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66556 +
66557 + if (gr_pid_is_chrooted(task))
66558 + return NULL;
66559 +
66560 + return task;
66561 }
66562
66563 struct task_struct *find_task_by_vpid(pid_t vnr)
66564 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66565 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66566 }
66567
66568 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66569 +{
66570 + rcu_lockdep_assert(rcu_read_lock_held(),
66571 + "find_task_by_pid_ns() needs rcu_read_lock()"
66572 + " protection");
66573 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66574 +}
66575 +
66576 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66577 {
66578 struct pid *pid;
66579 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66580 index 125cb67..a4d1c30 100644
66581 --- a/kernel/posix-cpu-timers.c
66582 +++ b/kernel/posix-cpu-timers.c
66583 @@ -6,6 +6,7 @@
66584 #include <linux/posix-timers.h>
66585 #include <linux/errno.h>
66586 #include <linux/math64.h>
66587 +#include <linux/security.h>
66588 #include <asm/uaccess.h>
66589 #include <linux/kernel_stat.h>
66590 #include <trace/events/timer.h>
66591 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66592
66593 static __init int init_posix_cpu_timers(void)
66594 {
66595 - struct k_clock process = {
66596 + static struct k_clock process = {
66597 .clock_getres = process_cpu_clock_getres,
66598 .clock_get = process_cpu_clock_get,
66599 .timer_create = process_cpu_timer_create,
66600 .nsleep = process_cpu_nsleep,
66601 .nsleep_restart = process_cpu_nsleep_restart,
66602 };
66603 - struct k_clock thread = {
66604 + static struct k_clock thread = {
66605 .clock_getres = thread_cpu_clock_getres,
66606 .clock_get = thread_cpu_clock_get,
66607 .timer_create = thread_cpu_timer_create,
66608 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66609 index 69185ae..cc2847a 100644
66610 --- a/kernel/posix-timers.c
66611 +++ b/kernel/posix-timers.c
66612 @@ -43,6 +43,7 @@
66613 #include <linux/idr.h>
66614 #include <linux/posix-clock.h>
66615 #include <linux/posix-timers.h>
66616 +#include <linux/grsecurity.h>
66617 #include <linux/syscalls.h>
66618 #include <linux/wait.h>
66619 #include <linux/workqueue.h>
66620 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66621 * which we beg off on and pass to do_sys_settimeofday().
66622 */
66623
66624 -static struct k_clock posix_clocks[MAX_CLOCKS];
66625 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66626
66627 /*
66628 * These ones are defined below.
66629 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66630 */
66631 static __init int init_posix_timers(void)
66632 {
66633 - struct k_clock clock_realtime = {
66634 + static struct k_clock clock_realtime = {
66635 .clock_getres = hrtimer_get_res,
66636 .clock_get = posix_clock_realtime_get,
66637 .clock_set = posix_clock_realtime_set,
66638 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66639 .timer_get = common_timer_get,
66640 .timer_del = common_timer_del,
66641 };
66642 - struct k_clock clock_monotonic = {
66643 + static struct k_clock clock_monotonic = {
66644 .clock_getres = hrtimer_get_res,
66645 .clock_get = posix_ktime_get_ts,
66646 .nsleep = common_nsleep,
66647 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66648 .timer_get = common_timer_get,
66649 .timer_del = common_timer_del,
66650 };
66651 - struct k_clock clock_monotonic_raw = {
66652 + static struct k_clock clock_monotonic_raw = {
66653 .clock_getres = hrtimer_get_res,
66654 .clock_get = posix_get_monotonic_raw,
66655 };
66656 - struct k_clock clock_realtime_coarse = {
66657 + static struct k_clock clock_realtime_coarse = {
66658 .clock_getres = posix_get_coarse_res,
66659 .clock_get = posix_get_realtime_coarse,
66660 };
66661 - struct k_clock clock_monotonic_coarse = {
66662 + static struct k_clock clock_monotonic_coarse = {
66663 .clock_getres = posix_get_coarse_res,
66664 .clock_get = posix_get_monotonic_coarse,
66665 };
66666 - struct k_clock clock_boottime = {
66667 + static struct k_clock clock_boottime = {
66668 .clock_getres = hrtimer_get_res,
66669 .clock_get = posix_get_boottime,
66670 .nsleep = common_nsleep,
66671 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
66672 return;
66673 }
66674
66675 - posix_clocks[clock_id] = *new_clock;
66676 + posix_clocks[clock_id] = new_clock;
66677 }
66678 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66679
66680 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66681 return (id & CLOCKFD_MASK) == CLOCKFD ?
66682 &clock_posix_dynamic : &clock_posix_cpu;
66683
66684 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
66685 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
66686 return NULL;
66687 - return &posix_clocks[id];
66688 + return posix_clocks[id];
66689 }
66690
66691 static int common_timer_create(struct k_itimer *new_timer)
66692 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
66693 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
66694 return -EFAULT;
66695
66696 + /* only the CLOCK_REALTIME clock can be set, all other clocks
66697 + have their clock_set fptr set to a nosettime dummy function
66698 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
66699 + call common_clock_set, which calls do_sys_settimeofday, which
66700 + we hook
66701 + */
66702 +
66703 return kc->clock_set(which_clock, &new_tp);
66704 }
66705
66706 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
66707 index d523593..68197a4 100644
66708 --- a/kernel/power/poweroff.c
66709 +++ b/kernel/power/poweroff.c
66710 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
66711 .enable_mask = SYSRQ_ENABLE_BOOT,
66712 };
66713
66714 -static int pm_sysrq_init(void)
66715 +static int __init pm_sysrq_init(void)
66716 {
66717 register_sysrq_key('o', &sysrq_poweroff_op);
66718 return 0;
66719 diff --git a/kernel/power/process.c b/kernel/power/process.c
66720 index 7e42645..3d43df1 100644
66721 --- a/kernel/power/process.c
66722 +++ b/kernel/power/process.c
66723 @@ -32,6 +32,7 @@ static int try_to_freeze_tasks(bool user_only)
66724 u64 elapsed_csecs64;
66725 unsigned int elapsed_csecs;
66726 bool wakeup = false;
66727 + bool timedout = false;
66728
66729 do_gettimeofday(&start);
66730
66731 @@ -42,6 +43,8 @@ static int try_to_freeze_tasks(bool user_only)
66732
66733 while (true) {
66734 todo = 0;
66735 + if (time_after(jiffies, end_time))
66736 + timedout = true;
66737 read_lock(&tasklist_lock);
66738 do_each_thread(g, p) {
66739 if (p == current || !freeze_task(p))
66740 @@ -59,9 +62,13 @@ static int try_to_freeze_tasks(bool user_only)
66741 * try_to_stop() after schedule() in ptrace/signal
66742 * stop sees TIF_FREEZE.
66743 */
66744 - if (!task_is_stopped_or_traced(p) &&
66745 - !freezer_should_skip(p))
66746 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
66747 todo++;
66748 + if (timedout) {
66749 + printk(KERN_ERR "Task refusing to freeze:\n");
66750 + sched_show_task(p);
66751 + }
66752 + }
66753 } while_each_thread(g, p);
66754 read_unlock(&tasklist_lock);
66755
66756 @@ -70,7 +77,7 @@ static int try_to_freeze_tasks(bool user_only)
66757 todo += wq_busy;
66758 }
66759
66760 - if (!todo || time_after(jiffies, end_time))
66761 + if (!todo || timedout)
66762 break;
66763
66764 if (pm_wakeup_pending()) {
66765 diff --git a/kernel/printk.c b/kernel/printk.c
66766 index 32690a0..cd7c798 100644
66767 --- a/kernel/printk.c
66768 +++ b/kernel/printk.c
66769 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
66770 if (from_file && type != SYSLOG_ACTION_OPEN)
66771 return 0;
66772
66773 +#ifdef CONFIG_GRKERNSEC_DMESG
66774 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
66775 + return -EPERM;
66776 +#endif
66777 +
66778 if (syslog_action_restricted(type)) {
66779 if (capable(CAP_SYSLOG))
66780 return 0;
66781 diff --git a/kernel/profile.c b/kernel/profile.c
66782 index 76b8e77..a2930e8 100644
66783 --- a/kernel/profile.c
66784 +++ b/kernel/profile.c
66785 @@ -39,7 +39,7 @@ struct profile_hit {
66786 /* Oprofile timer tick hook */
66787 static int (*timer_hook)(struct pt_regs *) __read_mostly;
66788
66789 -static atomic_t *prof_buffer;
66790 +static atomic_unchecked_t *prof_buffer;
66791 static unsigned long prof_len, prof_shift;
66792
66793 int prof_on __read_mostly;
66794 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
66795 hits[i].pc = 0;
66796 continue;
66797 }
66798 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66799 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66800 hits[i].hits = hits[i].pc = 0;
66801 }
66802 }
66803 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66804 * Add the current hit(s) and flush the write-queue out
66805 * to the global buffer:
66806 */
66807 - atomic_add(nr_hits, &prof_buffer[pc]);
66808 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
66809 for (i = 0; i < NR_PROFILE_HIT; ++i) {
66810 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66811 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66812 hits[i].pc = hits[i].hits = 0;
66813 }
66814 out:
66815 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66816 {
66817 unsigned long pc;
66818 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
66819 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
66820 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
66821 }
66822 #endif /* !CONFIG_SMP */
66823
66824 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
66825 return -EFAULT;
66826 buf++; p++; count--; read++;
66827 }
66828 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
66829 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
66830 if (copy_to_user(buf, (void *)pnt, count))
66831 return -EFAULT;
66832 read += count;
66833 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
66834 }
66835 #endif
66836 profile_discard_flip_buffers();
66837 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
66838 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
66839 return count;
66840 }
66841
66842 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
66843 index 00ab2ca..d237f61 100644
66844 --- a/kernel/ptrace.c
66845 +++ b/kernel/ptrace.c
66846 @@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
66847 task->ptrace = PT_PTRACED;
66848 if (seize)
66849 task->ptrace |= PT_SEIZED;
66850 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
66851 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
66852 task->ptrace |= PT_PTRACE_CAP;
66853
66854 __ptrace_link(task, current);
66855 @@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
66856 break;
66857 return -EIO;
66858 }
66859 - if (copy_to_user(dst, buf, retval))
66860 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
66861 return -EFAULT;
66862 copied += retval;
66863 src += retval;
66864 @@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
66865 bool seized = child->ptrace & PT_SEIZED;
66866 int ret = -EIO;
66867 siginfo_t siginfo, *si;
66868 - void __user *datavp = (void __user *) data;
66869 + void __user *datavp = (__force void __user *) data;
66870 unsigned long __user *datalp = datavp;
66871 unsigned long flags;
66872
66873 @@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
66874 goto out;
66875 }
66876
66877 + if (gr_handle_ptrace(child, request)) {
66878 + ret = -EPERM;
66879 + goto out_put_task_struct;
66880 + }
66881 +
66882 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66883 ret = ptrace_attach(child, request, data);
66884 /*
66885 * Some architectures need to do book-keeping after
66886 * a ptrace attach.
66887 */
66888 - if (!ret)
66889 + if (!ret) {
66890 arch_ptrace_attach(child);
66891 + gr_audit_ptrace(child);
66892 + }
66893 goto out_put_task_struct;
66894 }
66895
66896 @@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
66897 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
66898 if (copied != sizeof(tmp))
66899 return -EIO;
66900 - return put_user(tmp, (unsigned long __user *)data);
66901 + return put_user(tmp, (__force unsigned long __user *)data);
66902 }
66903
66904 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
66905 @@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
66906 goto out;
66907 }
66908
66909 + if (gr_handle_ptrace(child, request)) {
66910 + ret = -EPERM;
66911 + goto out_put_task_struct;
66912 + }
66913 +
66914 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66915 ret = ptrace_attach(child, request, data);
66916 /*
66917 * Some architectures need to do book-keeping after
66918 * a ptrace attach.
66919 */
66920 - if (!ret)
66921 + if (!ret) {
66922 arch_ptrace_attach(child);
66923 + gr_audit_ptrace(child);
66924 + }
66925 goto out_put_task_struct;
66926 }
66927
66928 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
66929 index 977296d..c4744dc 100644
66930 --- a/kernel/rcutiny.c
66931 +++ b/kernel/rcutiny.c
66932 @@ -46,7 +46,7 @@
66933 struct rcu_ctrlblk;
66934 static void invoke_rcu_callbacks(void);
66935 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
66936 -static void rcu_process_callbacks(struct softirq_action *unused);
66937 +static void rcu_process_callbacks(void);
66938 static void __call_rcu(struct rcu_head *head,
66939 void (*func)(struct rcu_head *rcu),
66940 struct rcu_ctrlblk *rcp);
66941 @@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
66942 rcu_is_callbacks_kthread()));
66943 }
66944
66945 -static void rcu_process_callbacks(struct softirq_action *unused)
66946 +static void rcu_process_callbacks(void)
66947 {
66948 __rcu_process_callbacks(&rcu_sched_ctrlblk);
66949 __rcu_process_callbacks(&rcu_bh_ctrlblk);
66950 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
66951 index 9cb1ae4..aac7d3e 100644
66952 --- a/kernel/rcutiny_plugin.h
66953 +++ b/kernel/rcutiny_plugin.h
66954 @@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
66955 have_rcu_kthread_work = morework;
66956 local_irq_restore(flags);
66957 if (work)
66958 - rcu_process_callbacks(NULL);
66959 + rcu_process_callbacks();
66960 schedule_timeout_interruptible(1); /* Leave CPU for others. */
66961 }
66962
66963 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
66964 index a58ac28..196a3d8 100644
66965 --- a/kernel/rcutorture.c
66966 +++ b/kernel/rcutorture.c
66967 @@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
66968 { 0 };
66969 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
66970 { 0 };
66971 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66972 -static atomic_t n_rcu_torture_alloc;
66973 -static atomic_t n_rcu_torture_alloc_fail;
66974 -static atomic_t n_rcu_torture_free;
66975 -static atomic_t n_rcu_torture_mberror;
66976 -static atomic_t n_rcu_torture_error;
66977 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66978 +static atomic_unchecked_t n_rcu_torture_alloc;
66979 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
66980 +static atomic_unchecked_t n_rcu_torture_free;
66981 +static atomic_unchecked_t n_rcu_torture_mberror;
66982 +static atomic_unchecked_t n_rcu_torture_error;
66983 static long n_rcu_torture_boost_ktrerror;
66984 static long n_rcu_torture_boost_rterror;
66985 static long n_rcu_torture_boost_failure;
66986 @@ -243,11 +243,11 @@ rcu_torture_alloc(void)
66987
66988 spin_lock_bh(&rcu_torture_lock);
66989 if (list_empty(&rcu_torture_freelist)) {
66990 - atomic_inc(&n_rcu_torture_alloc_fail);
66991 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
66992 spin_unlock_bh(&rcu_torture_lock);
66993 return NULL;
66994 }
66995 - atomic_inc(&n_rcu_torture_alloc);
66996 + atomic_inc_unchecked(&n_rcu_torture_alloc);
66997 p = rcu_torture_freelist.next;
66998 list_del_init(p);
66999 spin_unlock_bh(&rcu_torture_lock);
67000 @@ -260,7 +260,7 @@ rcu_torture_alloc(void)
67001 static void
67002 rcu_torture_free(struct rcu_torture *p)
67003 {
67004 - atomic_inc(&n_rcu_torture_free);
67005 + atomic_inc_unchecked(&n_rcu_torture_free);
67006 spin_lock_bh(&rcu_torture_lock);
67007 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67008 spin_unlock_bh(&rcu_torture_lock);
67009 @@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
67010 i = rp->rtort_pipe_count;
67011 if (i > RCU_TORTURE_PIPE_LEN)
67012 i = RCU_TORTURE_PIPE_LEN;
67013 - atomic_inc(&rcu_torture_wcount[i]);
67014 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67015 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67016 rp->rtort_mbtest = 0;
67017 rcu_torture_free(rp);
67018 @@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67019 i = rp->rtort_pipe_count;
67020 if (i > RCU_TORTURE_PIPE_LEN)
67021 i = RCU_TORTURE_PIPE_LEN;
67022 - atomic_inc(&rcu_torture_wcount[i]);
67023 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67024 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67025 rp->rtort_mbtest = 0;
67026 list_del(&rp->rtort_free);
67027 @@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
67028 i = old_rp->rtort_pipe_count;
67029 if (i > RCU_TORTURE_PIPE_LEN)
67030 i = RCU_TORTURE_PIPE_LEN;
67031 - atomic_inc(&rcu_torture_wcount[i]);
67032 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67033 old_rp->rtort_pipe_count++;
67034 cur_ops->deferred_free(old_rp);
67035 }
67036 @@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
67037 return;
67038 }
67039 if (p->rtort_mbtest == 0)
67040 - atomic_inc(&n_rcu_torture_mberror);
67041 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67042 spin_lock(&rand_lock);
67043 cur_ops->read_delay(&rand);
67044 n_rcu_torture_timers++;
67045 @@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
67046 continue;
67047 }
67048 if (p->rtort_mbtest == 0)
67049 - atomic_inc(&n_rcu_torture_mberror);
67050 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67051 cur_ops->read_delay(&rand);
67052 preempt_disable();
67053 pipe_count = p->rtort_pipe_count;
67054 @@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
67055 rcu_torture_current,
67056 rcu_torture_current_version,
67057 list_empty(&rcu_torture_freelist),
67058 - atomic_read(&n_rcu_torture_alloc),
67059 - atomic_read(&n_rcu_torture_alloc_fail),
67060 - atomic_read(&n_rcu_torture_free),
67061 - atomic_read(&n_rcu_torture_mberror),
67062 + atomic_read_unchecked(&n_rcu_torture_alloc),
67063 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67064 + atomic_read_unchecked(&n_rcu_torture_free),
67065 + atomic_read_unchecked(&n_rcu_torture_mberror),
67066 n_rcu_torture_boost_ktrerror,
67067 n_rcu_torture_boost_rterror,
67068 n_rcu_torture_boost_failure,
67069 @@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
67070 n_online_attempts,
67071 n_offline_successes,
67072 n_offline_attempts);
67073 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67074 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67075 n_rcu_torture_boost_ktrerror != 0 ||
67076 n_rcu_torture_boost_rterror != 0 ||
67077 n_rcu_torture_boost_failure != 0)
67078 @@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
67079 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67080 if (i > 1) {
67081 cnt += sprintf(&page[cnt], "!!! ");
67082 - atomic_inc(&n_rcu_torture_error);
67083 + atomic_inc_unchecked(&n_rcu_torture_error);
67084 WARN_ON_ONCE(1);
67085 }
67086 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67087 @@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
67088 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67089 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67090 cnt += sprintf(&page[cnt], " %d",
67091 - atomic_read(&rcu_torture_wcount[i]));
67092 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67093 }
67094 cnt += sprintf(&page[cnt], "\n");
67095 if (cur_ops->stats)
67096 @@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
67097
67098 if (cur_ops->cleanup)
67099 cur_ops->cleanup();
67100 - if (atomic_read(&n_rcu_torture_error))
67101 + if (atomic_read_unchecked(&n_rcu_torture_error))
67102 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67103 else
67104 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67105 @@ -1664,17 +1664,17 @@ rcu_torture_init(void)
67106
67107 rcu_torture_current = NULL;
67108 rcu_torture_current_version = 0;
67109 - atomic_set(&n_rcu_torture_alloc, 0);
67110 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67111 - atomic_set(&n_rcu_torture_free, 0);
67112 - atomic_set(&n_rcu_torture_mberror, 0);
67113 - atomic_set(&n_rcu_torture_error, 0);
67114 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67115 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67116 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67117 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67118 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67119 n_rcu_torture_boost_ktrerror = 0;
67120 n_rcu_torture_boost_rterror = 0;
67121 n_rcu_torture_boost_failure = 0;
67122 n_rcu_torture_boosts = 0;
67123 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67124 - atomic_set(&rcu_torture_wcount[i], 0);
67125 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67126 for_each_possible_cpu(cpu) {
67127 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67128 per_cpu(rcu_torture_count, cpu)[i] = 0;
67129 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67130 index 6c4a672..70f3202 100644
67131 --- a/kernel/rcutree.c
67132 +++ b/kernel/rcutree.c
67133 @@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67134 rcu_prepare_for_idle(smp_processor_id());
67135 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67136 smp_mb__before_atomic_inc(); /* See above. */
67137 - atomic_inc(&rdtp->dynticks);
67138 + atomic_inc_unchecked(&rdtp->dynticks);
67139 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67140 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67141 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67142 }
67143
67144 /**
67145 @@ -438,10 +438,10 @@ void rcu_irq_exit(void)
67146 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67147 {
67148 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67149 - atomic_inc(&rdtp->dynticks);
67150 + atomic_inc_unchecked(&rdtp->dynticks);
67151 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67152 smp_mb__after_atomic_inc(); /* See above. */
67153 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67154 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67155 rcu_cleanup_after_idle(smp_processor_id());
67156 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67157 if (!is_idle_task(current)) {
67158 @@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
67159 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67160
67161 if (rdtp->dynticks_nmi_nesting == 0 &&
67162 - (atomic_read(&rdtp->dynticks) & 0x1))
67163 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67164 return;
67165 rdtp->dynticks_nmi_nesting++;
67166 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67167 - atomic_inc(&rdtp->dynticks);
67168 + atomic_inc_unchecked(&rdtp->dynticks);
67169 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67170 smp_mb__after_atomic_inc(); /* See above. */
67171 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67172 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67173 }
67174
67175 /**
67176 @@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
67177 return;
67178 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67179 smp_mb__before_atomic_inc(); /* See above. */
67180 - atomic_inc(&rdtp->dynticks);
67181 + atomic_inc_unchecked(&rdtp->dynticks);
67182 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67183 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67184 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67185 }
67186
67187 #ifdef CONFIG_PROVE_RCU
67188 @@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
67189 int ret;
67190
67191 preempt_disable();
67192 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67193 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67194 preempt_enable();
67195 return ret;
67196 }
67197 @@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67198 */
67199 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67200 {
67201 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67202 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67203 return (rdp->dynticks_snap & 0x1) == 0;
67204 }
67205
67206 @@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67207 unsigned int curr;
67208 unsigned int snap;
67209
67210 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67211 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67212 snap = (unsigned int)rdp->dynticks_snap;
67213
67214 /*
67215 @@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67216 /*
67217 * Do RCU core processing for the current CPU.
67218 */
67219 -static void rcu_process_callbacks(struct softirq_action *unused)
67220 +static void rcu_process_callbacks(void)
67221 {
67222 trace_rcu_utilization("Start RCU core");
67223 __rcu_process_callbacks(&rcu_sched_state,
67224 @@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67225 rdp->qlen = 0;
67226 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67227 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
67228 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67229 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67230 rdp->cpu = cpu;
67231 rdp->rsp = rsp;
67232 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67233 @@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67234 rdp->n_force_qs_snap = rsp->n_force_qs;
67235 rdp->blimit = blimit;
67236 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
67237 - atomic_set(&rdp->dynticks->dynticks,
67238 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67239 + atomic_set_unchecked(&rdp->dynticks->dynticks,
67240 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67241 rcu_prepare_for_idle_init(cpu);
67242 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67243
67244 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67245 index fddff92..2c08359 100644
67246 --- a/kernel/rcutree.h
67247 +++ b/kernel/rcutree.h
67248 @@ -87,7 +87,7 @@ struct rcu_dynticks {
67249 long long dynticks_nesting; /* Track irq/process nesting level. */
67250 /* Process level is worth LLONG_MAX/2. */
67251 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67252 - atomic_t dynticks; /* Even value for idle, else odd. */
67253 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67254 };
67255
67256 /* RCU's kthread states for tracing. */
67257 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67258 index 8bb35d7..6ea0a463 100644
67259 --- a/kernel/rcutree_plugin.h
67260 +++ b/kernel/rcutree_plugin.h
67261 @@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
67262
67263 /* Clean up and exit. */
67264 smp_mb(); /* ensure expedited GP seen before counter increment. */
67265 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67266 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67267 unlock_mb_ret:
67268 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67269 mb_ret:
67270 @@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
67271
67272 #else /* #ifndef CONFIG_SMP */
67273
67274 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67275 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67276 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67277 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67278
67279 static int synchronize_sched_expedited_cpu_stop(void *data)
67280 {
67281 @@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
67282 int firstsnap, s, snap, trycount = 0;
67283
67284 /* Note that atomic_inc_return() implies full memory barrier. */
67285 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67286 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67287 get_online_cpus();
67288
67289 /*
67290 @@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
67291 }
67292
67293 /* Check to see if someone else did our work for us. */
67294 - s = atomic_read(&sync_sched_expedited_done);
67295 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67296 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67297 smp_mb(); /* ensure test happens before caller kfree */
67298 return;
67299 @@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
67300 * grace period works for us.
67301 */
67302 get_online_cpus();
67303 - snap = atomic_read(&sync_sched_expedited_started);
67304 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
67305 smp_mb(); /* ensure read is before try_stop_cpus(). */
67306 }
67307
67308 @@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
67309 * than we did beat us to the punch.
67310 */
67311 do {
67312 - s = atomic_read(&sync_sched_expedited_done);
67313 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67314 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67315 smp_mb(); /* ensure test happens before caller kfree */
67316 break;
67317 }
67318 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67319 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67320
67321 put_online_cpus();
67322 }
67323 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67324 index 654cfe6..c0b28e2 100644
67325 --- a/kernel/rcutree_trace.c
67326 +++ b/kernel/rcutree_trace.c
67327 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67328 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67329 rdp->qs_pending);
67330 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67331 - atomic_read(&rdp->dynticks->dynticks),
67332 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67333 rdp->dynticks->dynticks_nesting,
67334 rdp->dynticks->dynticks_nmi_nesting,
67335 rdp->dynticks_fqs);
67336 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67337 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67338 rdp->qs_pending);
67339 seq_printf(m, ",%d,%llx,%d,%lu",
67340 - atomic_read(&rdp->dynticks->dynticks),
67341 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67342 rdp->dynticks->dynticks_nesting,
67343 rdp->dynticks->dynticks_nmi_nesting,
67344 rdp->dynticks_fqs);
67345 diff --git a/kernel/resource.c b/kernel/resource.c
67346 index 7640b3a..5879283 100644
67347 --- a/kernel/resource.c
67348 +++ b/kernel/resource.c
67349 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67350
67351 static int __init ioresources_init(void)
67352 {
67353 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67354 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67355 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67356 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67357 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67358 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67359 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67360 +#endif
67361 +#else
67362 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67363 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67364 +#endif
67365 return 0;
67366 }
67367 __initcall(ioresources_init);
67368 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67369 index 98ec494..4241d6d 100644
67370 --- a/kernel/rtmutex-tester.c
67371 +++ b/kernel/rtmutex-tester.c
67372 @@ -20,7 +20,7 @@
67373 #define MAX_RT_TEST_MUTEXES 8
67374
67375 static spinlock_t rttest_lock;
67376 -static atomic_t rttest_event;
67377 +static atomic_unchecked_t rttest_event;
67378
67379 struct test_thread_data {
67380 int opcode;
67381 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67382
67383 case RTTEST_LOCKCONT:
67384 td->mutexes[td->opdata] = 1;
67385 - td->event = atomic_add_return(1, &rttest_event);
67386 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67387 return 0;
67388
67389 case RTTEST_RESET:
67390 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67391 return 0;
67392
67393 case RTTEST_RESETEVENT:
67394 - atomic_set(&rttest_event, 0);
67395 + atomic_set_unchecked(&rttest_event, 0);
67396 return 0;
67397
67398 default:
67399 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67400 return ret;
67401
67402 td->mutexes[id] = 1;
67403 - td->event = atomic_add_return(1, &rttest_event);
67404 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67405 rt_mutex_lock(&mutexes[id]);
67406 - td->event = atomic_add_return(1, &rttest_event);
67407 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67408 td->mutexes[id] = 4;
67409 return 0;
67410
67411 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67412 return ret;
67413
67414 td->mutexes[id] = 1;
67415 - td->event = atomic_add_return(1, &rttest_event);
67416 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67417 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67418 - td->event = atomic_add_return(1, &rttest_event);
67419 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67420 td->mutexes[id] = ret ? 0 : 4;
67421 return ret ? -EINTR : 0;
67422
67423 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67424 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67425 return ret;
67426
67427 - td->event = atomic_add_return(1, &rttest_event);
67428 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67429 rt_mutex_unlock(&mutexes[id]);
67430 - td->event = atomic_add_return(1, &rttest_event);
67431 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67432 td->mutexes[id] = 0;
67433 return 0;
67434
67435 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67436 break;
67437
67438 td->mutexes[dat] = 2;
67439 - td->event = atomic_add_return(1, &rttest_event);
67440 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67441 break;
67442
67443 default:
67444 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67445 return;
67446
67447 td->mutexes[dat] = 3;
67448 - td->event = atomic_add_return(1, &rttest_event);
67449 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67450 break;
67451
67452 case RTTEST_LOCKNOWAIT:
67453 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67454 return;
67455
67456 td->mutexes[dat] = 1;
67457 - td->event = atomic_add_return(1, &rttest_event);
67458 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67459 return;
67460
67461 default:
67462 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67463 index e8a1f83..363d17d 100644
67464 --- a/kernel/sched/auto_group.c
67465 +++ b/kernel/sched/auto_group.c
67466 @@ -11,7 +11,7 @@
67467
67468 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67469 static struct autogroup autogroup_default;
67470 -static atomic_t autogroup_seq_nr;
67471 +static atomic_unchecked_t autogroup_seq_nr;
67472
67473 void __init autogroup_init(struct task_struct *init_task)
67474 {
67475 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67476
67477 kref_init(&ag->kref);
67478 init_rwsem(&ag->lock);
67479 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67480 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67481 ag->tg = tg;
67482 #ifdef CONFIG_RT_GROUP_SCHED
67483 /*
67484 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67485 index b342f57..00324a0 100644
67486 --- a/kernel/sched/core.c
67487 +++ b/kernel/sched/core.c
67488 @@ -3143,6 +3143,19 @@ pick_next_task(struct rq *rq)
67489 BUG(); /* the idle class will always have a runnable task */
67490 }
67491
67492 +#ifdef CONFIG_GRKERNSEC_SETXID
67493 +extern void gr_delayed_cred_worker(void);
67494 +static inline void gr_cred_schedule(void)
67495 +{
67496 + if (unlikely(current->delayed_cred))
67497 + gr_delayed_cred_worker();
67498 +}
67499 +#else
67500 +static inline void gr_cred_schedule(void)
67501 +{
67502 +}
67503 +#endif
67504 +
67505 /*
67506 * __schedule() is the main scheduler function.
67507 */
67508 @@ -3162,6 +3175,8 @@ need_resched:
67509
67510 schedule_debug(prev);
67511
67512 + gr_cred_schedule();
67513 +
67514 if (sched_feat(HRTICK))
67515 hrtick_clear(rq);
67516
67517 @@ -3852,6 +3867,8 @@ int can_nice(const struct task_struct *p, const int nice)
67518 /* convert nice value [19,-20] to rlimit style value [1,40] */
67519 int nice_rlim = 20 - nice;
67520
67521 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67522 +
67523 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67524 capable(CAP_SYS_NICE));
67525 }
67526 @@ -3885,7 +3902,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67527 if (nice > 19)
67528 nice = 19;
67529
67530 - if (increment < 0 && !can_nice(current, nice))
67531 + if (increment < 0 && (!can_nice(current, nice) ||
67532 + gr_handle_chroot_nice()))
67533 return -EPERM;
67534
67535 retval = security_task_setnice(current, nice);
67536 @@ -4042,6 +4060,7 @@ recheck:
67537 unsigned long rlim_rtprio =
67538 task_rlimit(p, RLIMIT_RTPRIO);
67539
67540 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67541 /* can't set/change the rt policy */
67542 if (policy != p->policy && !rlim_rtprio)
67543 return -EPERM;
67544 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67545 index aca16b8..8e3acc4 100644
67546 --- a/kernel/sched/fair.c
67547 +++ b/kernel/sched/fair.c
67548 @@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67549 * run_rebalance_domains is triggered when needed from the scheduler tick.
67550 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67551 */
67552 -static void run_rebalance_domains(struct softirq_action *h)
67553 +static void run_rebalance_domains(void)
67554 {
67555 int this_cpu = smp_processor_id();
67556 struct rq *this_rq = cpu_rq(this_cpu);
67557 diff --git a/kernel/signal.c b/kernel/signal.c
67558 index c73c428..7040057 100644
67559 --- a/kernel/signal.c
67560 +++ b/kernel/signal.c
67561 @@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
67562
67563 int print_fatal_signals __read_mostly;
67564
67565 -static void __user *sig_handler(struct task_struct *t, int sig)
67566 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67567 {
67568 return t->sighand->action[sig - 1].sa.sa_handler;
67569 }
67570
67571 -static int sig_handler_ignored(void __user *handler, int sig)
67572 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67573 {
67574 /* Is it explicitly or implicitly ignored? */
67575 return handler == SIG_IGN ||
67576 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67577 static int sig_task_ignored(struct task_struct *t, int sig,
67578 int from_ancestor_ns)
67579 {
67580 - void __user *handler;
67581 + __sighandler_t handler;
67582
67583 handler = sig_handler(t, sig);
67584
67585 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67586 atomic_inc(&user->sigpending);
67587 rcu_read_unlock();
67588
67589 + if (!override_rlimit)
67590 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67591 +
67592 if (override_rlimit ||
67593 atomic_read(&user->sigpending) <=
67594 task_rlimit(t, RLIMIT_SIGPENDING)) {
67595 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67596
67597 int unhandled_signal(struct task_struct *tsk, int sig)
67598 {
67599 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67600 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67601 if (is_global_init(tsk))
67602 return 1;
67603 if (handler != SIG_IGN && handler != SIG_DFL)
67604 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67605 }
67606 }
67607
67608 + /* allow glibc communication via tgkill to other threads in our
67609 + thread group */
67610 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67611 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67612 + && gr_handle_signal(t, sig))
67613 + return -EPERM;
67614 +
67615 return security_task_kill(t, info, sig, 0);
67616 }
67617
67618 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67619 return send_signal(sig, info, p, 1);
67620 }
67621
67622 -static int
67623 +int
67624 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67625 {
67626 return send_signal(sig, info, t, 0);
67627 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67628 unsigned long int flags;
67629 int ret, blocked, ignored;
67630 struct k_sigaction *action;
67631 + int is_unhandled = 0;
67632
67633 spin_lock_irqsave(&t->sighand->siglock, flags);
67634 action = &t->sighand->action[sig-1];
67635 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67636 }
67637 if (action->sa.sa_handler == SIG_DFL)
67638 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67639 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67640 + is_unhandled = 1;
67641 ret = specific_send_sig_info(sig, info, t);
67642 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67643
67644 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67645 + normal operation */
67646 + if (is_unhandled) {
67647 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67648 + gr_handle_crash(t, sig);
67649 + }
67650 +
67651 return ret;
67652 }
67653
67654 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67655 ret = check_kill_permission(sig, info, p);
67656 rcu_read_unlock();
67657
67658 - if (!ret && sig)
67659 + if (!ret && sig) {
67660 ret = do_send_sig_info(sig, info, p, true);
67661 + if (!ret)
67662 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67663 + }
67664
67665 return ret;
67666 }
67667 @@ -2820,7 +2843,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67668 int error = -ESRCH;
67669
67670 rcu_read_lock();
67671 - p = find_task_by_vpid(pid);
67672 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67673 + /* allow glibc communication via tgkill to other threads in our
67674 + thread group */
67675 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
67676 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
67677 + p = find_task_by_vpid_unrestricted(pid);
67678 + else
67679 +#endif
67680 + p = find_task_by_vpid(pid);
67681 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
67682 error = check_kill_permission(sig, info, p);
67683 /*
67684 diff --git a/kernel/smp.c b/kernel/smp.c
67685 index db197d6..17aef0b 100644
67686 --- a/kernel/smp.c
67687 +++ b/kernel/smp.c
67688 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
67689 }
67690 EXPORT_SYMBOL(smp_call_function);
67691
67692 -void ipi_call_lock(void)
67693 +void ipi_call_lock(void) __acquires(call_function.lock)
67694 {
67695 raw_spin_lock(&call_function.lock);
67696 }
67697
67698 -void ipi_call_unlock(void)
67699 +void ipi_call_unlock(void) __releases(call_function.lock)
67700 {
67701 raw_spin_unlock(&call_function.lock);
67702 }
67703
67704 -void ipi_call_lock_irq(void)
67705 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
67706 {
67707 raw_spin_lock_irq(&call_function.lock);
67708 }
67709
67710 -void ipi_call_unlock_irq(void)
67711 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
67712 {
67713 raw_spin_unlock_irq(&call_function.lock);
67714 }
67715 diff --git a/kernel/softirq.c b/kernel/softirq.c
67716 index 4eb3a0f..6f1fa81 100644
67717 --- a/kernel/softirq.c
67718 +++ b/kernel/softirq.c
67719 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
67720
67721 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
67722
67723 -char *softirq_to_name[NR_SOFTIRQS] = {
67724 +const char * const softirq_to_name[NR_SOFTIRQS] = {
67725 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
67726 "TASKLET", "SCHED", "HRTIMER", "RCU"
67727 };
67728 @@ -235,7 +235,7 @@ restart:
67729 kstat_incr_softirqs_this_cpu(vec_nr);
67730
67731 trace_softirq_entry(vec_nr);
67732 - h->action(h);
67733 + h->action();
67734 trace_softirq_exit(vec_nr);
67735 if (unlikely(prev_count != preempt_count())) {
67736 printk(KERN_ERR "huh, entered softirq %u %s %p"
67737 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
67738 local_irq_restore(flags);
67739 }
67740
67741 -void open_softirq(int nr, void (*action)(struct softirq_action *))
67742 +void open_softirq(int nr, void (*action)(void))
67743 {
67744 - softirq_vec[nr].action = action;
67745 + pax_open_kernel();
67746 + *(void **)&softirq_vec[nr].action = action;
67747 + pax_close_kernel();
67748 }
67749
67750 /*
67751 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
67752
67753 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
67754
67755 -static void tasklet_action(struct softirq_action *a)
67756 +static void tasklet_action(void)
67757 {
67758 struct tasklet_struct *list;
67759
67760 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
67761 }
67762 }
67763
67764 -static void tasklet_hi_action(struct softirq_action *a)
67765 +static void tasklet_hi_action(void)
67766 {
67767 struct tasklet_struct *list;
67768
67769 diff --git a/kernel/sys.c b/kernel/sys.c
67770 index 888d227..f04b318 100644
67771 --- a/kernel/sys.c
67772 +++ b/kernel/sys.c
67773 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
67774 error = -EACCES;
67775 goto out;
67776 }
67777 +
67778 + if (gr_handle_chroot_setpriority(p, niceval)) {
67779 + error = -EACCES;
67780 + goto out;
67781 + }
67782 +
67783 no_nice = security_task_setnice(p, niceval);
67784 if (no_nice) {
67785 error = no_nice;
67786 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
67787 goto error;
67788 }
67789
67790 + if (gr_check_group_change(new->gid, new->egid, -1))
67791 + goto error;
67792 +
67793 if (rgid != (gid_t) -1 ||
67794 (egid != (gid_t) -1 && egid != old->gid))
67795 new->sgid = new->egid;
67796 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
67797 old = current_cred();
67798
67799 retval = -EPERM;
67800 +
67801 + if (gr_check_group_change(gid, gid, gid))
67802 + goto error;
67803 +
67804 if (nsown_capable(CAP_SETGID))
67805 new->gid = new->egid = new->sgid = new->fsgid = gid;
67806 else if (gid == old->gid || gid == old->sgid)
67807 @@ -618,7 +631,7 @@ error:
67808 /*
67809 * change the user struct in a credentials set to match the new UID
67810 */
67811 -static int set_user(struct cred *new)
67812 +int set_user(struct cred *new)
67813 {
67814 struct user_struct *new_user;
67815
67816 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
67817 goto error;
67818 }
67819
67820 + if (gr_check_user_change(new->uid, new->euid, -1))
67821 + goto error;
67822 +
67823 if (new->uid != old->uid) {
67824 retval = set_user(new);
67825 if (retval < 0)
67826 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
67827 old = current_cred();
67828
67829 retval = -EPERM;
67830 +
67831 + if (gr_check_crash_uid(uid))
67832 + goto error;
67833 + if (gr_check_user_change(uid, uid, uid))
67834 + goto error;
67835 +
67836 if (nsown_capable(CAP_SETUID)) {
67837 new->suid = new->uid = uid;
67838 if (uid != old->uid) {
67839 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
67840 goto error;
67841 }
67842
67843 + if (gr_check_user_change(ruid, euid, -1))
67844 + goto error;
67845 +
67846 if (ruid != (uid_t) -1) {
67847 new->uid = ruid;
67848 if (ruid != old->uid) {
67849 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
67850 goto error;
67851 }
67852
67853 + if (gr_check_group_change(rgid, egid, -1))
67854 + goto error;
67855 +
67856 if (rgid != (gid_t) -1)
67857 new->gid = rgid;
67858 if (egid != (gid_t) -1)
67859 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
67860 old = current_cred();
67861 old_fsuid = old->fsuid;
67862
67863 + if (gr_check_user_change(-1, -1, uid))
67864 + goto error;
67865 +
67866 if (uid == old->uid || uid == old->euid ||
67867 uid == old->suid || uid == old->fsuid ||
67868 nsown_capable(CAP_SETUID)) {
67869 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
67870 }
67871 }
67872
67873 +error:
67874 abort_creds(new);
67875 return old_fsuid;
67876
67877 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
67878 if (gid == old->gid || gid == old->egid ||
67879 gid == old->sgid || gid == old->fsgid ||
67880 nsown_capable(CAP_SETGID)) {
67881 + if (gr_check_group_change(-1, -1, gid))
67882 + goto error;
67883 +
67884 if (gid != old_fsgid) {
67885 new->fsgid = gid;
67886 goto change_okay;
67887 }
67888 }
67889
67890 +error:
67891 abort_creds(new);
67892 return old_fsgid;
67893
67894 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
67895 }
67896 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
67897 snprintf(buf, len, "2.6.%u%s", v, rest);
67898 - ret = copy_to_user(release, buf, len);
67899 + if (len > sizeof(buf))
67900 + ret = -EFAULT;
67901 + else
67902 + ret = copy_to_user(release, buf, len);
67903 }
67904 return ret;
67905 }
67906 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
67907 return -EFAULT;
67908
67909 down_read(&uts_sem);
67910 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
67911 + error = __copy_to_user(name->sysname, &utsname()->sysname,
67912 __OLD_UTS_LEN);
67913 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
67914 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
67915 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
67916 __OLD_UTS_LEN);
67917 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
67918 - error |= __copy_to_user(&name->release, &utsname()->release,
67919 + error |= __copy_to_user(name->release, &utsname()->release,
67920 __OLD_UTS_LEN);
67921 error |= __put_user(0, name->release + __OLD_UTS_LEN);
67922 - error |= __copy_to_user(&name->version, &utsname()->version,
67923 + error |= __copy_to_user(name->version, &utsname()->version,
67924 __OLD_UTS_LEN);
67925 error |= __put_user(0, name->version + __OLD_UTS_LEN);
67926 - error |= __copy_to_user(&name->machine, &utsname()->machine,
67927 + error |= __copy_to_user(name->machine, &utsname()->machine,
67928 __OLD_UTS_LEN);
67929 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
67930 up_read(&uts_sem);
67931 @@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
67932 error = get_dumpable(me->mm);
67933 break;
67934 case PR_SET_DUMPABLE:
67935 - if (arg2 < 0 || arg2 > 1) {
67936 + if (arg2 > 1) {
67937 error = -EINVAL;
67938 break;
67939 }
67940 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
67941 index f487f25..9056a9e 100644
67942 --- a/kernel/sysctl.c
67943 +++ b/kernel/sysctl.c
67944 @@ -86,6 +86,13 @@
67945
67946
67947 #if defined(CONFIG_SYSCTL)
67948 +#include <linux/grsecurity.h>
67949 +#include <linux/grinternal.h>
67950 +
67951 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
67952 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67953 + const int op);
67954 +extern int gr_handle_chroot_sysctl(const int op);
67955
67956 /* External variables not in a header file. */
67957 extern int sysctl_overcommit_memory;
67958 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
67959 }
67960
67961 #endif
67962 +extern struct ctl_table grsecurity_table[];
67963
67964 static struct ctl_table root_table[];
67965 static struct ctl_table_root sysctl_table_root;
67966 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
67967 int sysctl_legacy_va_layout;
67968 #endif
67969
67970 +#ifdef CONFIG_PAX_SOFTMODE
67971 +static ctl_table pax_table[] = {
67972 + {
67973 + .procname = "softmode",
67974 + .data = &pax_softmode,
67975 + .maxlen = sizeof(unsigned int),
67976 + .mode = 0600,
67977 + .proc_handler = &proc_dointvec,
67978 + },
67979 +
67980 + { }
67981 +};
67982 +#endif
67983 +
67984 /* The default sysctl tables: */
67985
67986 static struct ctl_table root_table[] = {
67987 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
67988 #endif
67989
67990 static struct ctl_table kern_table[] = {
67991 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
67992 + {
67993 + .procname = "grsecurity",
67994 + .mode = 0500,
67995 + .child = grsecurity_table,
67996 + },
67997 +#endif
67998 +
67999 +#ifdef CONFIG_PAX_SOFTMODE
68000 + {
68001 + .procname = "pax",
68002 + .mode = 0500,
68003 + .child = pax_table,
68004 + },
68005 +#endif
68006 +
68007 {
68008 .procname = "sched_child_runs_first",
68009 .data = &sysctl_sched_child_runs_first,
68010 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
68011 .data = &modprobe_path,
68012 .maxlen = KMOD_PATH_LEN,
68013 .mode = 0644,
68014 - .proc_handler = proc_dostring,
68015 + .proc_handler = proc_dostring_modpriv,
68016 },
68017 {
68018 .procname = "modules_disabled",
68019 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
68020 .extra1 = &zero,
68021 .extra2 = &one,
68022 },
68023 +#endif
68024 {
68025 .procname = "kptr_restrict",
68026 .data = &kptr_restrict,
68027 .maxlen = sizeof(int),
68028 .mode = 0644,
68029 .proc_handler = proc_dmesg_restrict,
68030 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68031 + .extra1 = &two,
68032 +#else
68033 .extra1 = &zero,
68034 +#endif
68035 .extra2 = &two,
68036 },
68037 -#endif
68038 {
68039 .procname = "ngroups_max",
68040 .data = &ngroups_max,
68041 @@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
68042 .proc_handler = proc_dointvec_minmax,
68043 .extra1 = &zero,
68044 },
68045 + {
68046 + .procname = "heap_stack_gap",
68047 + .data = &sysctl_heap_stack_gap,
68048 + .maxlen = sizeof(sysctl_heap_stack_gap),
68049 + .mode = 0644,
68050 + .proc_handler = proc_doulongvec_minmax,
68051 + },
68052 #else
68053 {
68054 .procname = "nr_trim_pages",
68055 @@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
68056 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68057 {
68058 int mode;
68059 + int error;
68060 +
68061 + if (table->parent != NULL && table->parent->procname != NULL &&
68062 + table->procname != NULL &&
68063 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68064 + return -EACCES;
68065 + if (gr_handle_chroot_sysctl(op))
68066 + return -EACCES;
68067 + error = gr_handle_sysctl(table, op);
68068 + if (error)
68069 + return error;
68070
68071 if (root->permissions)
68072 mode = root->permissions(root, current->nsproxy, table);
68073 @@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
68074 buffer, lenp, ppos);
68075 }
68076
68077 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68078 + void __user *buffer, size_t *lenp, loff_t *ppos)
68079 +{
68080 + if (write && !capable(CAP_SYS_MODULE))
68081 + return -EPERM;
68082 +
68083 + return _proc_do_string(table->data, table->maxlen, write,
68084 + buffer, lenp, ppos);
68085 +}
68086 +
68087 static size_t proc_skip_spaces(char **buf)
68088 {
68089 size_t ret;
68090 @@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68091 len = strlen(tmp);
68092 if (len > *size)
68093 len = *size;
68094 + if (len > sizeof(tmp))
68095 + len = sizeof(tmp);
68096 if (copy_to_user(*buf, tmp, len))
68097 return -EFAULT;
68098 *size -= len;
68099 @@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68100 *i = val;
68101 } else {
68102 val = convdiv * (*i) / convmul;
68103 - if (!first)
68104 + if (!first) {
68105 err = proc_put_char(&buffer, &left, '\t');
68106 + if (err)
68107 + break;
68108 + }
68109 err = proc_put_long(&buffer, &left, val, false);
68110 if (err)
68111 break;
68112 @@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
68113 return -ENOSYS;
68114 }
68115
68116 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68117 + void __user *buffer, size_t *lenp, loff_t *ppos)
68118 +{
68119 + return -ENOSYS;
68120 +}
68121 +
68122 int proc_dointvec(struct ctl_table *table, int write,
68123 void __user *buffer, size_t *lenp, loff_t *ppos)
68124 {
68125 @@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68126 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68127 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68128 EXPORT_SYMBOL(proc_dostring);
68129 +EXPORT_SYMBOL(proc_dostring_modpriv);
68130 EXPORT_SYMBOL(proc_doulongvec_minmax);
68131 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68132 EXPORT_SYMBOL(register_sysctl_table);
68133 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68134 index a650694..aaeeb20 100644
68135 --- a/kernel/sysctl_binary.c
68136 +++ b/kernel/sysctl_binary.c
68137 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68138 int i;
68139
68140 set_fs(KERNEL_DS);
68141 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68142 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68143 set_fs(old_fs);
68144 if (result < 0)
68145 goto out_kfree;
68146 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68147 }
68148
68149 set_fs(KERNEL_DS);
68150 - result = vfs_write(file, buffer, str - buffer, &pos);
68151 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68152 set_fs(old_fs);
68153 if (result < 0)
68154 goto out_kfree;
68155 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68156 int i;
68157
68158 set_fs(KERNEL_DS);
68159 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68160 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68161 set_fs(old_fs);
68162 if (result < 0)
68163 goto out_kfree;
68164 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68165 }
68166
68167 set_fs(KERNEL_DS);
68168 - result = vfs_write(file, buffer, str - buffer, &pos);
68169 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68170 set_fs(old_fs);
68171 if (result < 0)
68172 goto out_kfree;
68173 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68174 int i;
68175
68176 set_fs(KERNEL_DS);
68177 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68178 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68179 set_fs(old_fs);
68180 if (result < 0)
68181 goto out;
68182 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68183 __le16 dnaddr;
68184
68185 set_fs(KERNEL_DS);
68186 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68187 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68188 set_fs(old_fs);
68189 if (result < 0)
68190 goto out;
68191 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68192 le16_to_cpu(dnaddr) & 0x3ff);
68193
68194 set_fs(KERNEL_DS);
68195 - result = vfs_write(file, buf, len, &pos);
68196 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68197 set_fs(old_fs);
68198 if (result < 0)
68199 goto out;
68200 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
68201 index 362da65..ab8ef8c 100644
68202 --- a/kernel/sysctl_check.c
68203 +++ b/kernel/sysctl_check.c
68204 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
68205 set_fail(&fail, table, "Directory with extra2");
68206 } else {
68207 if ((table->proc_handler == proc_dostring) ||
68208 + (table->proc_handler == proc_dostring_modpriv) ||
68209 (table->proc_handler == proc_dointvec) ||
68210 (table->proc_handler == proc_dointvec_minmax) ||
68211 (table->proc_handler == proc_dointvec_jiffies) ||
68212 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68213 index e660464..c8b9e67 100644
68214 --- a/kernel/taskstats.c
68215 +++ b/kernel/taskstats.c
68216 @@ -27,9 +27,12 @@
68217 #include <linux/cgroup.h>
68218 #include <linux/fs.h>
68219 #include <linux/file.h>
68220 +#include <linux/grsecurity.h>
68221 #include <net/genetlink.h>
68222 #include <linux/atomic.h>
68223
68224 +extern int gr_is_taskstats_denied(int pid);
68225 +
68226 /*
68227 * Maximum length of a cpumask that can be specified in
68228 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68229 @@ -556,6 +559,9 @@ err:
68230
68231 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68232 {
68233 + if (gr_is_taskstats_denied(current->pid))
68234 + return -EACCES;
68235 +
68236 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68237 return cmd_attr_register_cpumask(info);
68238 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68239 diff --git a/kernel/time.c b/kernel/time.c
68240 index 73e416d..cfc6f69 100644
68241 --- a/kernel/time.c
68242 +++ b/kernel/time.c
68243 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68244 return error;
68245
68246 if (tz) {
68247 + /* we log in do_settimeofday called below, so don't log twice
68248 + */
68249 + if (!tv)
68250 + gr_log_timechange();
68251 +
68252 /* SMP safe, global irq locking makes it work. */
68253 sys_tz = *tz;
68254 update_vsyscall_tz();
68255 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68256 index 8a46f5d..bbe6f9c 100644
68257 --- a/kernel/time/alarmtimer.c
68258 +++ b/kernel/time/alarmtimer.c
68259 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
68260 struct platform_device *pdev;
68261 int error = 0;
68262 int i;
68263 - struct k_clock alarm_clock = {
68264 + static struct k_clock alarm_clock = {
68265 .clock_getres = alarm_clock_getres,
68266 .clock_get = alarm_clock_get,
68267 .timer_create = alarm_timer_create,
68268 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68269 index fd4a7b1..fae5c2a 100644
68270 --- a/kernel/time/tick-broadcast.c
68271 +++ b/kernel/time/tick-broadcast.c
68272 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68273 * then clear the broadcast bit.
68274 */
68275 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68276 - int cpu = smp_processor_id();
68277 + cpu = smp_processor_id();
68278
68279 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68280 tick_broadcast_clear_oneshot(cpu);
68281 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68282 index 0c63581..e25dcb6 100644
68283 --- a/kernel/time/timekeeping.c
68284 +++ b/kernel/time/timekeeping.c
68285 @@ -14,6 +14,7 @@
68286 #include <linux/init.h>
68287 #include <linux/mm.h>
68288 #include <linux/sched.h>
68289 +#include <linux/grsecurity.h>
68290 #include <linux/syscore_ops.h>
68291 #include <linux/clocksource.h>
68292 #include <linux/jiffies.h>
68293 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
68294 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68295 return -EINVAL;
68296
68297 + gr_log_timechange();
68298 +
68299 write_seqlock_irqsave(&xtime_lock, flags);
68300
68301 timekeeping_forward_now();
68302 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68303 index 3258455..f35227d 100644
68304 --- a/kernel/time/timer_list.c
68305 +++ b/kernel/time/timer_list.c
68306 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68307
68308 static void print_name_offset(struct seq_file *m, void *sym)
68309 {
68310 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68311 + SEQ_printf(m, "<%p>", NULL);
68312 +#else
68313 char symname[KSYM_NAME_LEN];
68314
68315 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68316 SEQ_printf(m, "<%pK>", sym);
68317 else
68318 SEQ_printf(m, "%s", symname);
68319 +#endif
68320 }
68321
68322 static void
68323 @@ -112,7 +116,11 @@ next_one:
68324 static void
68325 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68326 {
68327 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68328 + SEQ_printf(m, " .base: %p\n", NULL);
68329 +#else
68330 SEQ_printf(m, " .base: %pK\n", base);
68331 +#endif
68332 SEQ_printf(m, " .index: %d\n",
68333 base->index);
68334 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68335 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68336 {
68337 struct proc_dir_entry *pe;
68338
68339 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68340 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68341 +#else
68342 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68343 +#endif
68344 if (!pe)
68345 return -ENOMEM;
68346 return 0;
68347 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68348 index 0b537f2..9e71eca 100644
68349 --- a/kernel/time/timer_stats.c
68350 +++ b/kernel/time/timer_stats.c
68351 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68352 static unsigned long nr_entries;
68353 static struct entry entries[MAX_ENTRIES];
68354
68355 -static atomic_t overflow_count;
68356 +static atomic_unchecked_t overflow_count;
68357
68358 /*
68359 * The entries are in a hash-table, for fast lookup:
68360 @@ -140,7 +140,7 @@ static void reset_entries(void)
68361 nr_entries = 0;
68362 memset(entries, 0, sizeof(entries));
68363 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68364 - atomic_set(&overflow_count, 0);
68365 + atomic_set_unchecked(&overflow_count, 0);
68366 }
68367
68368 static struct entry *alloc_entry(void)
68369 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68370 if (likely(entry))
68371 entry->count++;
68372 else
68373 - atomic_inc(&overflow_count);
68374 + atomic_inc_unchecked(&overflow_count);
68375
68376 out_unlock:
68377 raw_spin_unlock_irqrestore(lock, flags);
68378 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68379
68380 static void print_name_offset(struct seq_file *m, unsigned long addr)
68381 {
68382 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68383 + seq_printf(m, "<%p>", NULL);
68384 +#else
68385 char symname[KSYM_NAME_LEN];
68386
68387 if (lookup_symbol_name(addr, symname) < 0)
68388 seq_printf(m, "<%p>", (void *)addr);
68389 else
68390 seq_printf(m, "%s", symname);
68391 +#endif
68392 }
68393
68394 static int tstats_show(struct seq_file *m, void *v)
68395 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68396
68397 seq_puts(m, "Timer Stats Version: v0.2\n");
68398 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68399 - if (atomic_read(&overflow_count))
68400 + if (atomic_read_unchecked(&overflow_count))
68401 seq_printf(m, "Overflow: %d entries\n",
68402 - atomic_read(&overflow_count));
68403 + atomic_read_unchecked(&overflow_count));
68404
68405 for (i = 0; i < nr_entries; i++) {
68406 entry = entries + i;
68407 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68408 {
68409 struct proc_dir_entry *pe;
68410
68411 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68412 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68413 +#else
68414 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68415 +#endif
68416 if (!pe)
68417 return -ENOMEM;
68418 return 0;
68419 diff --git a/kernel/timer.c b/kernel/timer.c
68420 index a297ffc..5e16b0b 100644
68421 --- a/kernel/timer.c
68422 +++ b/kernel/timer.c
68423 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68424 /*
68425 * This function runs timers and the timer-tq in bottom half context.
68426 */
68427 -static void run_timer_softirq(struct softirq_action *h)
68428 +static void run_timer_softirq(void)
68429 {
68430 struct tvec_base *base = __this_cpu_read(tvec_bases);
68431
68432 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68433 index cdea7b5..9b820d4 100644
68434 --- a/kernel/trace/blktrace.c
68435 +++ b/kernel/trace/blktrace.c
68436 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68437 struct blk_trace *bt = filp->private_data;
68438 char buf[16];
68439
68440 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68441 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68442
68443 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68444 }
68445 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68446 return 1;
68447
68448 bt = buf->chan->private_data;
68449 - atomic_inc(&bt->dropped);
68450 + atomic_inc_unchecked(&bt->dropped);
68451 return 0;
68452 }
68453
68454 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68455
68456 bt->dir = dir;
68457 bt->dev = dev;
68458 - atomic_set(&bt->dropped, 0);
68459 + atomic_set_unchecked(&bt->dropped, 0);
68460
68461 ret = -EIO;
68462 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68463 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68464 index 683d559..d70d914 100644
68465 --- a/kernel/trace/ftrace.c
68466 +++ b/kernel/trace/ftrace.c
68467 @@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68468 if (unlikely(ftrace_disabled))
68469 return 0;
68470
68471 + ret = ftrace_arch_code_modify_prepare();
68472 + FTRACE_WARN_ON(ret);
68473 + if (ret)
68474 + return 0;
68475 +
68476 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68477 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68478 if (ret) {
68479 ftrace_bug(ret, ip);
68480 - return 0;
68481 }
68482 - return 1;
68483 + return ret ? 0 : 1;
68484 }
68485
68486 /*
68487 @@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68488
68489 int
68490 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68491 - void *data)
68492 + void *data)
68493 {
68494 struct ftrace_func_probe *entry;
68495 struct ftrace_page *pg;
68496 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68497 index a3f1bc5..5e651718 100644
68498 --- a/kernel/trace/trace.c
68499 +++ b/kernel/trace/trace.c
68500 @@ -4254,10 +4254,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68501 };
68502 #endif
68503
68504 -static struct dentry *d_tracer;
68505 -
68506 struct dentry *tracing_init_dentry(void)
68507 {
68508 + static struct dentry *d_tracer;
68509 static int once;
68510
68511 if (d_tracer)
68512 @@ -4277,10 +4276,9 @@ struct dentry *tracing_init_dentry(void)
68513 return d_tracer;
68514 }
68515
68516 -static struct dentry *d_percpu;
68517 -
68518 struct dentry *tracing_dentry_percpu(void)
68519 {
68520 + static struct dentry *d_percpu;
68521 static int once;
68522 struct dentry *d_tracer;
68523
68524 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68525 index c212a7f..7b02394 100644
68526 --- a/kernel/trace/trace_events.c
68527 +++ b/kernel/trace/trace_events.c
68528 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
68529 struct ftrace_module_file_ops {
68530 struct list_head list;
68531 struct module *mod;
68532 - struct file_operations id;
68533 - struct file_operations enable;
68534 - struct file_operations format;
68535 - struct file_operations filter;
68536 };
68537
68538 static struct ftrace_module_file_ops *
68539 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
68540
68541 file_ops->mod = mod;
68542
68543 - file_ops->id = ftrace_event_id_fops;
68544 - file_ops->id.owner = mod;
68545 -
68546 - file_ops->enable = ftrace_enable_fops;
68547 - file_ops->enable.owner = mod;
68548 -
68549 - file_ops->filter = ftrace_event_filter_fops;
68550 - file_ops->filter.owner = mod;
68551 -
68552 - file_ops->format = ftrace_event_format_fops;
68553 - file_ops->format.owner = mod;
68554 + pax_open_kernel();
68555 + *(void **)&mod->trace_id.owner = mod;
68556 + *(void **)&mod->trace_enable.owner = mod;
68557 + *(void **)&mod->trace_filter.owner = mod;
68558 + *(void **)&mod->trace_format.owner = mod;
68559 + pax_close_kernel();
68560
68561 list_add(&file_ops->list, &ftrace_module_file_list);
68562
68563 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
68564
68565 for_each_event(call, start, end) {
68566 __trace_add_event_call(*call, mod,
68567 - &file_ops->id, &file_ops->enable,
68568 - &file_ops->filter, &file_ops->format);
68569 + &mod->trace_id, &mod->trace_enable,
68570 + &mod->trace_filter, &mod->trace_format);
68571 }
68572 }
68573
68574 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68575 index 00d527c..7c5b1a3 100644
68576 --- a/kernel/trace/trace_kprobe.c
68577 +++ b/kernel/trace/trace_kprobe.c
68578 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68579 long ret;
68580 int maxlen = get_rloc_len(*(u32 *)dest);
68581 u8 *dst = get_rloc_data(dest);
68582 - u8 *src = addr;
68583 + const u8 __user *src = (const u8 __force_user *)addr;
68584 mm_segment_t old_fs = get_fs();
68585 if (!maxlen)
68586 return;
68587 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68588 pagefault_disable();
68589 do
68590 ret = __copy_from_user_inatomic(dst++, src++, 1);
68591 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68592 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68593 dst[-1] = '\0';
68594 pagefault_enable();
68595 set_fs(old_fs);
68596 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68597 ((u8 *)get_rloc_data(dest))[0] = '\0';
68598 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68599 } else
68600 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68601 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68602 get_rloc_offs(*(u32 *)dest));
68603 }
68604 /* Return the length of string -- including null terminal byte */
68605 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68606 set_fs(KERNEL_DS);
68607 pagefault_disable();
68608 do {
68609 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68610 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68611 len++;
68612 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68613 pagefault_enable();
68614 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68615 index fd3c8aa..5f324a6 100644
68616 --- a/kernel/trace/trace_mmiotrace.c
68617 +++ b/kernel/trace/trace_mmiotrace.c
68618 @@ -24,7 +24,7 @@ struct header_iter {
68619 static struct trace_array *mmio_trace_array;
68620 static bool overrun_detected;
68621 static unsigned long prev_overruns;
68622 -static atomic_t dropped_count;
68623 +static atomic_unchecked_t dropped_count;
68624
68625 static void mmio_reset_data(struct trace_array *tr)
68626 {
68627 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68628
68629 static unsigned long count_overruns(struct trace_iterator *iter)
68630 {
68631 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68632 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68633 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68634
68635 if (over > prev_overruns)
68636 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68637 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68638 sizeof(*entry), 0, pc);
68639 if (!event) {
68640 - atomic_inc(&dropped_count);
68641 + atomic_inc_unchecked(&dropped_count);
68642 return;
68643 }
68644 entry = ring_buffer_event_data(event);
68645 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68646 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68647 sizeof(*entry), 0, pc);
68648 if (!event) {
68649 - atomic_inc(&dropped_count);
68650 + atomic_inc_unchecked(&dropped_count);
68651 return;
68652 }
68653 entry = ring_buffer_event_data(event);
68654 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68655 index 0d6ff35..67e0ed7 100644
68656 --- a/kernel/trace/trace_output.c
68657 +++ b/kernel/trace/trace_output.c
68658 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
68659
68660 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68661 if (!IS_ERR(p)) {
68662 - p = mangle_path(s->buffer + s->len, p, "\n");
68663 + p = mangle_path(s->buffer + s->len, p, "\n\\");
68664 if (p) {
68665 s->len = p - s->buffer;
68666 return 1;
68667 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68668 index d4545f4..a9010a1 100644
68669 --- a/kernel/trace/trace_stack.c
68670 +++ b/kernel/trace/trace_stack.c
68671 @@ -53,7 +53,7 @@ static inline void check_stack(void)
68672 return;
68673
68674 /* we do not handle interrupt stacks yet */
68675 - if (!object_is_on_stack(&this_size))
68676 + if (!object_starts_on_stack(&this_size))
68677 return;
68678
68679 local_irq_save(flags);
68680 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
68681 index 209b379..7f76423 100644
68682 --- a/kernel/trace/trace_workqueue.c
68683 +++ b/kernel/trace/trace_workqueue.c
68684 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
68685 int cpu;
68686 pid_t pid;
68687 /* Can be inserted from interrupt or user context, need to be atomic */
68688 - atomic_t inserted;
68689 + atomic_unchecked_t inserted;
68690 /*
68691 * Don't need to be atomic, works are serialized in a single workqueue thread
68692 * on a single CPU.
68693 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
68694 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
68695 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
68696 if (node->pid == wq_thread->pid) {
68697 - atomic_inc(&node->inserted);
68698 + atomic_inc_unchecked(&node->inserted);
68699 goto found;
68700 }
68701 }
68702 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
68703 tsk = get_pid_task(pid, PIDTYPE_PID);
68704 if (tsk) {
68705 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
68706 - atomic_read(&cws->inserted), cws->executed,
68707 + atomic_read_unchecked(&cws->inserted), cws->executed,
68708 tsk->comm);
68709 put_task_struct(tsk);
68710 }
68711 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
68712 index 8745ac7..d144e37 100644
68713 --- a/lib/Kconfig.debug
68714 +++ b/lib/Kconfig.debug
68715 @@ -1103,6 +1103,7 @@ config LATENCYTOP
68716 depends on DEBUG_KERNEL
68717 depends on STACKTRACE_SUPPORT
68718 depends on PROC_FS
68719 + depends on !GRKERNSEC_HIDESYM
68720 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
68721 select KALLSYMS
68722 select KALLSYMS_ALL
68723 diff --git a/lib/bitmap.c b/lib/bitmap.c
68724 index 0d4a127..33a06c7 100644
68725 --- a/lib/bitmap.c
68726 +++ b/lib/bitmap.c
68727 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
68728 {
68729 int c, old_c, totaldigits, ndigits, nchunks, nbits;
68730 u32 chunk;
68731 - const char __user __force *ubuf = (const char __user __force *)buf;
68732 + const char __user *ubuf = (const char __force_user *)buf;
68733
68734 bitmap_zero(maskp, nmaskbits);
68735
68736 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
68737 {
68738 if (!access_ok(VERIFY_READ, ubuf, ulen))
68739 return -EFAULT;
68740 - return __bitmap_parse((const char __force *)ubuf,
68741 + return __bitmap_parse((const char __force_kernel *)ubuf,
68742 ulen, 1, maskp, nmaskbits);
68743
68744 }
68745 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
68746 {
68747 unsigned a, b;
68748 int c, old_c, totaldigits;
68749 - const char __user __force *ubuf = (const char __user __force *)buf;
68750 + const char __user *ubuf = (const char __force_user *)buf;
68751 int exp_digit, in_range;
68752
68753 totaldigits = c = 0;
68754 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
68755 {
68756 if (!access_ok(VERIFY_READ, ubuf, ulen))
68757 return -EFAULT;
68758 - return __bitmap_parselist((const char __force *)ubuf,
68759 + return __bitmap_parselist((const char __force_kernel *)ubuf,
68760 ulen, 1, maskp, nmaskbits);
68761 }
68762 EXPORT_SYMBOL(bitmap_parselist_user);
68763 diff --git a/lib/bug.c b/lib/bug.c
68764 index a28c141..2bd3d95 100644
68765 --- a/lib/bug.c
68766 +++ b/lib/bug.c
68767 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
68768 return BUG_TRAP_TYPE_NONE;
68769
68770 bug = find_bug(bugaddr);
68771 + if (!bug)
68772 + return BUG_TRAP_TYPE_NONE;
68773
68774 file = NULL;
68775 line = 0;
68776 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
68777 index 0ab9ae8..f01ceca 100644
68778 --- a/lib/debugobjects.c
68779 +++ b/lib/debugobjects.c
68780 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
68781 if (limit > 4)
68782 return;
68783
68784 - is_on_stack = object_is_on_stack(addr);
68785 + is_on_stack = object_starts_on_stack(addr);
68786 if (is_on_stack == onstack)
68787 return;
68788
68789 diff --git a/lib/devres.c b/lib/devres.c
68790 index 9676617..5149e15 100644
68791 --- a/lib/devres.c
68792 +++ b/lib/devres.c
68793 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
68794 void devm_iounmap(struct device *dev, void __iomem *addr)
68795 {
68796 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
68797 - (void *)addr));
68798 + (void __force *)addr));
68799 iounmap(addr);
68800 }
68801 EXPORT_SYMBOL(devm_iounmap);
68802 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
68803 {
68804 ioport_unmap(addr);
68805 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
68806 - devm_ioport_map_match, (void *)addr));
68807 + devm_ioport_map_match, (void __force *)addr));
68808 }
68809 EXPORT_SYMBOL(devm_ioport_unmap);
68810
68811 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
68812 index fea790a..ebb0e82 100644
68813 --- a/lib/dma-debug.c
68814 +++ b/lib/dma-debug.c
68815 @@ -925,7 +925,7 @@ out:
68816
68817 static void check_for_stack(struct device *dev, void *addr)
68818 {
68819 - if (object_is_on_stack(addr))
68820 + if (object_starts_on_stack(addr))
68821 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
68822 "stack [addr=%p]\n", addr);
68823 }
68824 diff --git a/lib/extable.c b/lib/extable.c
68825 index 4cac81e..63e9b8f 100644
68826 --- a/lib/extable.c
68827 +++ b/lib/extable.c
68828 @@ -13,6 +13,7 @@
68829 #include <linux/init.h>
68830 #include <linux/sort.h>
68831 #include <asm/uaccess.h>
68832 +#include <asm/pgtable.h>
68833
68834 #ifndef ARCH_HAS_SORT_EXTABLE
68835 /*
68836 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
68837 void sort_extable(struct exception_table_entry *start,
68838 struct exception_table_entry *finish)
68839 {
68840 + pax_open_kernel();
68841 sort(start, finish - start, sizeof(struct exception_table_entry),
68842 cmp_ex, NULL);
68843 + pax_close_kernel();
68844 }
68845
68846 #ifdef CONFIG_MODULES
68847 diff --git a/lib/inflate.c b/lib/inflate.c
68848 index 013a761..c28f3fc 100644
68849 --- a/lib/inflate.c
68850 +++ b/lib/inflate.c
68851 @@ -269,7 +269,7 @@ static void free(void *where)
68852 malloc_ptr = free_mem_ptr;
68853 }
68854 #else
68855 -#define malloc(a) kmalloc(a, GFP_KERNEL)
68856 +#define malloc(a) kmalloc((a), GFP_KERNEL)
68857 #define free(a) kfree(a)
68858 #endif
68859
68860 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
68861 index bd2bea9..6b3c95e 100644
68862 --- a/lib/is_single_threaded.c
68863 +++ b/lib/is_single_threaded.c
68864 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
68865 struct task_struct *p, *t;
68866 bool ret;
68867
68868 + if (!mm)
68869 + return true;
68870 +
68871 if (atomic_read(&task->signal->live) != 1)
68872 return false;
68873
68874 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
68875 index dc63d08..95ae14a 100644
68876 --- a/lib/radix-tree.c
68877 +++ b/lib/radix-tree.c
68878 @@ -78,7 +78,7 @@ struct radix_tree_preload {
68879 int nr;
68880 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
68881 };
68882 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
68883 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
68884
68885 static inline void *ptr_to_indirect(void *ptr)
68886 {
68887 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
68888 index 38e612e..4fb99a8 100644
68889 --- a/lib/vsprintf.c
68890 +++ b/lib/vsprintf.c
68891 @@ -16,6 +16,9 @@
68892 * - scnprintf and vscnprintf
68893 */
68894
68895 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68896 +#define __INCLUDED_BY_HIDESYM 1
68897 +#endif
68898 #include <stdarg.h>
68899 #include <linux/module.h>
68900 #include <linux/types.h>
68901 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
68902 char sym[KSYM_SYMBOL_LEN];
68903 if (ext == 'B')
68904 sprint_backtrace(sym, value);
68905 - else if (ext != 'f' && ext != 's')
68906 + else if (ext != 'f' && ext != 's' && ext != 'a')
68907 sprint_symbol(sym, value);
68908 else
68909 kallsyms_lookup(value, NULL, NULL, NULL, sym);
68910 @@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
68911 return number(buf, end, *(const netdev_features_t *)addr, spec);
68912 }
68913
68914 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68915 +int kptr_restrict __read_mostly = 2;
68916 +#else
68917 int kptr_restrict __read_mostly;
68918 +#endif
68919
68920 /*
68921 * Show a '%p' thing. A kernel extension is that the '%p' is followed
68922 @@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
68923 * - 'S' For symbolic direct pointers with offset
68924 * - 's' For symbolic direct pointers without offset
68925 * - 'B' For backtraced symbolic direct pointers with offset
68926 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
68927 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
68928 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
68929 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
68930 * - 'M' For a 6-byte MAC address, it prints the address in the
68931 @@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
68932 {
68933 if (!ptr && *fmt != 'K') {
68934 /*
68935 - * Print (null) with the same width as a pointer so it makes
68936 + * Print (nil) with the same width as a pointer so it makes
68937 * tabular output look nice.
68938 */
68939 if (spec.field_width == -1)
68940 spec.field_width = 2 * sizeof(void *);
68941 - return string(buf, end, "(null)", spec);
68942 + return string(buf, end, "(nil)", spec);
68943 }
68944
68945 switch (*fmt) {
68946 @@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
68947 /* Fallthrough */
68948 case 'S':
68949 case 's':
68950 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68951 + break;
68952 +#else
68953 + return symbol_string(buf, end, ptr, spec, *fmt);
68954 +#endif
68955 + case 'A':
68956 + case 'a':
68957 case 'B':
68958 return symbol_string(buf, end, ptr, spec, *fmt);
68959 case 'R':
68960 @@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
68961 typeof(type) value; \
68962 if (sizeof(type) == 8) { \
68963 args = PTR_ALIGN(args, sizeof(u32)); \
68964 - *(u32 *)&value = *(u32 *)args; \
68965 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
68966 + *(u32 *)&value = *(const u32 *)args; \
68967 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
68968 } else { \
68969 args = PTR_ALIGN(args, sizeof(type)); \
68970 - value = *(typeof(type) *)args; \
68971 + value = *(const typeof(type) *)args; \
68972 } \
68973 args += sizeof(type); \
68974 value; \
68975 @@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
68976 case FORMAT_TYPE_STR: {
68977 const char *str_arg = args;
68978 args += strlen(str_arg) + 1;
68979 - str = string(str, end, (char *)str_arg, spec);
68980 + str = string(str, end, str_arg, spec);
68981 break;
68982 }
68983
68984 diff --git a/localversion-grsec b/localversion-grsec
68985 new file mode 100644
68986 index 0000000..7cd6065
68987 --- /dev/null
68988 +++ b/localversion-grsec
68989 @@ -0,0 +1 @@
68990 +-grsec
68991 diff --git a/mm/Kconfig b/mm/Kconfig
68992 index e338407..49b5b7a 100644
68993 --- a/mm/Kconfig
68994 +++ b/mm/Kconfig
68995 @@ -247,10 +247,10 @@ config KSM
68996 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
68997
68998 config DEFAULT_MMAP_MIN_ADDR
68999 - int "Low address space to protect from user allocation"
69000 + int "Low address space to protect from user allocation"
69001 depends on MMU
69002 - default 4096
69003 - help
69004 + default 65536
69005 + help
69006 This is the portion of low virtual memory which should be protected
69007 from userspace allocation. Keeping a user from writing to low pages
69008 can help reduce the impact of kernel NULL pointer bugs.
69009 diff --git a/mm/filemap.c b/mm/filemap.c
69010 index b662757..3081ddd 100644
69011 --- a/mm/filemap.c
69012 +++ b/mm/filemap.c
69013 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69014 struct address_space *mapping = file->f_mapping;
69015
69016 if (!mapping->a_ops->readpage)
69017 - return -ENOEXEC;
69018 + return -ENODEV;
69019 file_accessed(file);
69020 vma->vm_ops = &generic_file_vm_ops;
69021 vma->vm_flags |= VM_CAN_NONLINEAR;
69022 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69023 *pos = i_size_read(inode);
69024
69025 if (limit != RLIM_INFINITY) {
69026 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69027 if (*pos >= limit) {
69028 send_sig(SIGXFSZ, current, 0);
69029 return -EFBIG;
69030 diff --git a/mm/fremap.c b/mm/fremap.c
69031 index 9ed4fd4..c42648d 100644
69032 --- a/mm/fremap.c
69033 +++ b/mm/fremap.c
69034 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69035 retry:
69036 vma = find_vma(mm, start);
69037
69038 +#ifdef CONFIG_PAX_SEGMEXEC
69039 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69040 + goto out;
69041 +#endif
69042 +
69043 /*
69044 * Make sure the vma is shared, that it supports prefaulting,
69045 * and that the remapped range is valid and fully within
69046 diff --git a/mm/highmem.c b/mm/highmem.c
69047 index 57d82c6..e9e0552 100644
69048 --- a/mm/highmem.c
69049 +++ b/mm/highmem.c
69050 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69051 * So no dangers, even with speculative execution.
69052 */
69053 page = pte_page(pkmap_page_table[i]);
69054 + pax_open_kernel();
69055 pte_clear(&init_mm, (unsigned long)page_address(page),
69056 &pkmap_page_table[i]);
69057 -
69058 + pax_close_kernel();
69059 set_page_address(page, NULL);
69060 need_flush = 1;
69061 }
69062 @@ -186,9 +187,11 @@ start:
69063 }
69064 }
69065 vaddr = PKMAP_ADDR(last_pkmap_nr);
69066 +
69067 + pax_open_kernel();
69068 set_pte_at(&init_mm, vaddr,
69069 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69070 -
69071 + pax_close_kernel();
69072 pkmap_count[last_pkmap_nr] = 1;
69073 set_page_address(page, (void *)vaddr);
69074
69075 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69076 index 8f7fc39..69bf1e9 100644
69077 --- a/mm/huge_memory.c
69078 +++ b/mm/huge_memory.c
69079 @@ -733,7 +733,7 @@ out:
69080 * run pte_offset_map on the pmd, if an huge pmd could
69081 * materialize from under us from a different thread.
69082 */
69083 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69084 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69085 return VM_FAULT_OOM;
69086 /* if an huge pmd materialized from under us just retry later */
69087 if (unlikely(pmd_trans_huge(*pmd)))
69088 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69089 index a876871..132cde0 100644
69090 --- a/mm/hugetlb.c
69091 +++ b/mm/hugetlb.c
69092 @@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69093 return 1;
69094 }
69095
69096 +#ifdef CONFIG_PAX_SEGMEXEC
69097 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69098 +{
69099 + struct mm_struct *mm = vma->vm_mm;
69100 + struct vm_area_struct *vma_m;
69101 + unsigned long address_m;
69102 + pte_t *ptep_m;
69103 +
69104 + vma_m = pax_find_mirror_vma(vma);
69105 + if (!vma_m)
69106 + return;
69107 +
69108 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69109 + address_m = address + SEGMEXEC_TASK_SIZE;
69110 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69111 + get_page(page_m);
69112 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69113 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69114 +}
69115 +#endif
69116 +
69117 /*
69118 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69119 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69120 @@ -2459,6 +2480,11 @@ retry_avoidcopy:
69121 make_huge_pte(vma, new_page, 1));
69122 page_remove_rmap(old_page);
69123 hugepage_add_new_anon_rmap(new_page, vma, address);
69124 +
69125 +#ifdef CONFIG_PAX_SEGMEXEC
69126 + pax_mirror_huge_pte(vma, address, new_page);
69127 +#endif
69128 +
69129 /* Make the old page be freed below */
69130 new_page = old_page;
69131 mmu_notifier_invalidate_range_end(mm,
69132 @@ -2613,6 +2639,10 @@ retry:
69133 && (vma->vm_flags & VM_SHARED)));
69134 set_huge_pte_at(mm, address, ptep, new_pte);
69135
69136 +#ifdef CONFIG_PAX_SEGMEXEC
69137 + pax_mirror_huge_pte(vma, address, page);
69138 +#endif
69139 +
69140 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69141 /* Optimization, do the COW without a second fault */
69142 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69143 @@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69144 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69145 struct hstate *h = hstate_vma(vma);
69146
69147 +#ifdef CONFIG_PAX_SEGMEXEC
69148 + struct vm_area_struct *vma_m;
69149 +#endif
69150 +
69151 address &= huge_page_mask(h);
69152
69153 ptep = huge_pte_offset(mm, address);
69154 @@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69155 VM_FAULT_SET_HINDEX(h - hstates);
69156 }
69157
69158 +#ifdef CONFIG_PAX_SEGMEXEC
69159 + vma_m = pax_find_mirror_vma(vma);
69160 + if (vma_m) {
69161 + unsigned long address_m;
69162 +
69163 + if (vma->vm_start > vma_m->vm_start) {
69164 + address_m = address;
69165 + address -= SEGMEXEC_TASK_SIZE;
69166 + vma = vma_m;
69167 + h = hstate_vma(vma);
69168 + } else
69169 + address_m = address + SEGMEXEC_TASK_SIZE;
69170 +
69171 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69172 + return VM_FAULT_OOM;
69173 + address_m &= HPAGE_MASK;
69174 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69175 + }
69176 +#endif
69177 +
69178 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69179 if (!ptep)
69180 return VM_FAULT_OOM;
69181 diff --git a/mm/internal.h b/mm/internal.h
69182 index 2189af4..f2ca332 100644
69183 --- a/mm/internal.h
69184 +++ b/mm/internal.h
69185 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69186 * in mm/page_alloc.c
69187 */
69188 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69189 +extern void free_compound_page(struct page *page);
69190 extern void prep_compound_page(struct page *page, unsigned long order);
69191 #ifdef CONFIG_MEMORY_FAILURE
69192 extern bool is_free_buddy_page(struct page *page);
69193 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69194 index 45eb621..6ccd8ea 100644
69195 --- a/mm/kmemleak.c
69196 +++ b/mm/kmemleak.c
69197 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69198
69199 for (i = 0; i < object->trace_len; i++) {
69200 void *ptr = (void *)object->trace[i];
69201 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69202 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69203 }
69204 }
69205
69206 diff --git a/mm/maccess.c b/mm/maccess.c
69207 index d53adf9..03a24bf 100644
69208 --- a/mm/maccess.c
69209 +++ b/mm/maccess.c
69210 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69211 set_fs(KERNEL_DS);
69212 pagefault_disable();
69213 ret = __copy_from_user_inatomic(dst,
69214 - (__force const void __user *)src, size);
69215 + (const void __force_user *)src, size);
69216 pagefault_enable();
69217 set_fs(old_fs);
69218
69219 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69220
69221 set_fs(KERNEL_DS);
69222 pagefault_disable();
69223 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69224 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69225 pagefault_enable();
69226 set_fs(old_fs);
69227
69228 diff --git a/mm/madvise.c b/mm/madvise.c
69229 index 74bf193..feb6fd3 100644
69230 --- a/mm/madvise.c
69231 +++ b/mm/madvise.c
69232 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69233 pgoff_t pgoff;
69234 unsigned long new_flags = vma->vm_flags;
69235
69236 +#ifdef CONFIG_PAX_SEGMEXEC
69237 + struct vm_area_struct *vma_m;
69238 +#endif
69239 +
69240 switch (behavior) {
69241 case MADV_NORMAL:
69242 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69243 @@ -110,6 +114,13 @@ success:
69244 /*
69245 * vm_flags is protected by the mmap_sem held in write mode.
69246 */
69247 +
69248 +#ifdef CONFIG_PAX_SEGMEXEC
69249 + vma_m = pax_find_mirror_vma(vma);
69250 + if (vma_m)
69251 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69252 +#endif
69253 +
69254 vma->vm_flags = new_flags;
69255
69256 out:
69257 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69258 struct vm_area_struct ** prev,
69259 unsigned long start, unsigned long end)
69260 {
69261 +
69262 +#ifdef CONFIG_PAX_SEGMEXEC
69263 + struct vm_area_struct *vma_m;
69264 +#endif
69265 +
69266 *prev = vma;
69267 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69268 return -EINVAL;
69269 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69270 zap_page_range(vma, start, end - start, &details);
69271 } else
69272 zap_page_range(vma, start, end - start, NULL);
69273 +
69274 +#ifdef CONFIG_PAX_SEGMEXEC
69275 + vma_m = pax_find_mirror_vma(vma);
69276 + if (vma_m) {
69277 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69278 + struct zap_details details = {
69279 + .nonlinear_vma = vma_m,
69280 + .last_index = ULONG_MAX,
69281 + };
69282 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69283 + } else
69284 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69285 + }
69286 +#endif
69287 +
69288 return 0;
69289 }
69290
69291 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69292 if (end < start)
69293 goto out;
69294
69295 +#ifdef CONFIG_PAX_SEGMEXEC
69296 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69297 + if (end > SEGMEXEC_TASK_SIZE)
69298 + goto out;
69299 + } else
69300 +#endif
69301 +
69302 + if (end > TASK_SIZE)
69303 + goto out;
69304 +
69305 error = 0;
69306 if (end == start)
69307 goto out;
69308 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69309 index 56080ea..115071e 100644
69310 --- a/mm/memory-failure.c
69311 +++ b/mm/memory-failure.c
69312 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69313
69314 int sysctl_memory_failure_recovery __read_mostly = 1;
69315
69316 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69317 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69318
69319 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69320
69321 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
69322 si.si_signo = SIGBUS;
69323 si.si_errno = 0;
69324 si.si_code = BUS_MCEERR_AO;
69325 - si.si_addr = (void *)addr;
69326 + si.si_addr = (void __user *)addr;
69327 #ifdef __ARCH_SI_TRAPNO
69328 si.si_trapno = trapno;
69329 #endif
69330 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69331 }
69332
69333 nr_pages = 1 << compound_trans_order(hpage);
69334 - atomic_long_add(nr_pages, &mce_bad_pages);
69335 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69336
69337 /*
69338 * We need/can do nothing about count=0 pages.
69339 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69340 if (!PageHWPoison(hpage)
69341 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69342 || (p != hpage && TestSetPageHWPoison(hpage))) {
69343 - atomic_long_sub(nr_pages, &mce_bad_pages);
69344 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69345 return 0;
69346 }
69347 set_page_hwpoison_huge_page(hpage);
69348 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69349 }
69350 if (hwpoison_filter(p)) {
69351 if (TestClearPageHWPoison(p))
69352 - atomic_long_sub(nr_pages, &mce_bad_pages);
69353 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69354 unlock_page(hpage);
69355 put_page(hpage);
69356 return 0;
69357 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
69358 return 0;
69359 }
69360 if (TestClearPageHWPoison(p))
69361 - atomic_long_sub(nr_pages, &mce_bad_pages);
69362 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69363 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69364 return 0;
69365 }
69366 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
69367 */
69368 if (TestClearPageHWPoison(page)) {
69369 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69370 - atomic_long_sub(nr_pages, &mce_bad_pages);
69371 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69372 freeit = 1;
69373 if (PageHuge(page))
69374 clear_page_hwpoison_huge_page(page);
69375 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69376 }
69377 done:
69378 if (!PageHWPoison(hpage))
69379 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69380 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69381 set_page_hwpoison_huge_page(hpage);
69382 dequeue_hwpoisoned_huge_page(hpage);
69383 /* keep elevated page count for bad page */
69384 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
69385 return ret;
69386
69387 done:
69388 - atomic_long_add(1, &mce_bad_pages);
69389 + atomic_long_add_unchecked(1, &mce_bad_pages);
69390 SetPageHWPoison(page);
69391 /* keep elevated page count for bad page */
69392 return ret;
69393 diff --git a/mm/memory.c b/mm/memory.c
69394 index fa2f04e..a8a40c8 100644
69395 --- a/mm/memory.c
69396 +++ b/mm/memory.c
69397 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69398 return;
69399
69400 pmd = pmd_offset(pud, start);
69401 +
69402 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69403 pud_clear(pud);
69404 pmd_free_tlb(tlb, pmd, start);
69405 +#endif
69406 +
69407 }
69408
69409 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69410 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69411 if (end - 1 > ceiling - 1)
69412 return;
69413
69414 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69415 pud = pud_offset(pgd, start);
69416 pgd_clear(pgd);
69417 pud_free_tlb(tlb, pud, start);
69418 +#endif
69419 +
69420 }
69421
69422 /*
69423 @@ -1585,12 +1592,6 @@ no_page_table:
69424 return page;
69425 }
69426
69427 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69428 -{
69429 - return stack_guard_page_start(vma, addr) ||
69430 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69431 -}
69432 -
69433 /**
69434 * __get_user_pages() - pin user pages in memory
69435 * @tsk: task_struct of target task
69436 @@ -1663,10 +1664,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69437 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69438 i = 0;
69439
69440 - do {
69441 + while (nr_pages) {
69442 struct vm_area_struct *vma;
69443
69444 - vma = find_extend_vma(mm, start);
69445 + vma = find_vma(mm, start);
69446 if (!vma && in_gate_area(mm, start)) {
69447 unsigned long pg = start & PAGE_MASK;
69448 pgd_t *pgd;
69449 @@ -1714,7 +1715,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69450 goto next_page;
69451 }
69452
69453 - if (!vma ||
69454 + if (!vma || start < vma->vm_start ||
69455 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69456 !(vm_flags & vma->vm_flags))
69457 return i ? : -EFAULT;
69458 @@ -1741,11 +1742,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69459 int ret;
69460 unsigned int fault_flags = 0;
69461
69462 - /* For mlock, just skip the stack guard page. */
69463 - if (foll_flags & FOLL_MLOCK) {
69464 - if (stack_guard_page(vma, start))
69465 - goto next_page;
69466 - }
69467 if (foll_flags & FOLL_WRITE)
69468 fault_flags |= FAULT_FLAG_WRITE;
69469 if (nonblocking)
69470 @@ -1819,7 +1815,7 @@ next_page:
69471 start += PAGE_SIZE;
69472 nr_pages--;
69473 } while (nr_pages && start < vma->vm_end);
69474 - } while (nr_pages);
69475 + }
69476 return i;
69477 }
69478 EXPORT_SYMBOL(__get_user_pages);
69479 @@ -2026,6 +2022,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69480 page_add_file_rmap(page);
69481 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69482
69483 +#ifdef CONFIG_PAX_SEGMEXEC
69484 + pax_mirror_file_pte(vma, addr, page, ptl);
69485 +#endif
69486 +
69487 retval = 0;
69488 pte_unmap_unlock(pte, ptl);
69489 return retval;
69490 @@ -2060,10 +2060,22 @@ out:
69491 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69492 struct page *page)
69493 {
69494 +
69495 +#ifdef CONFIG_PAX_SEGMEXEC
69496 + struct vm_area_struct *vma_m;
69497 +#endif
69498 +
69499 if (addr < vma->vm_start || addr >= vma->vm_end)
69500 return -EFAULT;
69501 if (!page_count(page))
69502 return -EINVAL;
69503 +
69504 +#ifdef CONFIG_PAX_SEGMEXEC
69505 + vma_m = pax_find_mirror_vma(vma);
69506 + if (vma_m)
69507 + vma_m->vm_flags |= VM_INSERTPAGE;
69508 +#endif
69509 +
69510 vma->vm_flags |= VM_INSERTPAGE;
69511 return insert_page(vma, addr, page, vma->vm_page_prot);
69512 }
69513 @@ -2149,6 +2161,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69514 unsigned long pfn)
69515 {
69516 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69517 + BUG_ON(vma->vm_mirror);
69518
69519 if (addr < vma->vm_start || addr >= vma->vm_end)
69520 return -EFAULT;
69521 @@ -2464,6 +2477,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69522 copy_user_highpage(dst, src, va, vma);
69523 }
69524
69525 +#ifdef CONFIG_PAX_SEGMEXEC
69526 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69527 +{
69528 + struct mm_struct *mm = vma->vm_mm;
69529 + spinlock_t *ptl;
69530 + pte_t *pte, entry;
69531 +
69532 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69533 + entry = *pte;
69534 + if (!pte_present(entry)) {
69535 + if (!pte_none(entry)) {
69536 + BUG_ON(pte_file(entry));
69537 + free_swap_and_cache(pte_to_swp_entry(entry));
69538 + pte_clear_not_present_full(mm, address, pte, 0);
69539 + }
69540 + } else {
69541 + struct page *page;
69542 +
69543 + flush_cache_page(vma, address, pte_pfn(entry));
69544 + entry = ptep_clear_flush(vma, address, pte);
69545 + BUG_ON(pte_dirty(entry));
69546 + page = vm_normal_page(vma, address, entry);
69547 + if (page) {
69548 + update_hiwater_rss(mm);
69549 + if (PageAnon(page))
69550 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69551 + else
69552 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69553 + page_remove_rmap(page);
69554 + page_cache_release(page);
69555 + }
69556 + }
69557 + pte_unmap_unlock(pte, ptl);
69558 +}
69559 +
69560 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69561 + *
69562 + * the ptl of the lower mapped page is held on entry and is not released on exit
69563 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69564 + */
69565 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69566 +{
69567 + struct mm_struct *mm = vma->vm_mm;
69568 + unsigned long address_m;
69569 + spinlock_t *ptl_m;
69570 + struct vm_area_struct *vma_m;
69571 + pmd_t *pmd_m;
69572 + pte_t *pte_m, entry_m;
69573 +
69574 + BUG_ON(!page_m || !PageAnon(page_m));
69575 +
69576 + vma_m = pax_find_mirror_vma(vma);
69577 + if (!vma_m)
69578 + return;
69579 +
69580 + BUG_ON(!PageLocked(page_m));
69581 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69582 + address_m = address + SEGMEXEC_TASK_SIZE;
69583 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69584 + pte_m = pte_offset_map(pmd_m, address_m);
69585 + ptl_m = pte_lockptr(mm, pmd_m);
69586 + if (ptl != ptl_m) {
69587 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69588 + if (!pte_none(*pte_m))
69589 + goto out;
69590 + }
69591 +
69592 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69593 + page_cache_get(page_m);
69594 + page_add_anon_rmap(page_m, vma_m, address_m);
69595 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69596 + set_pte_at(mm, address_m, pte_m, entry_m);
69597 + update_mmu_cache(vma_m, address_m, entry_m);
69598 +out:
69599 + if (ptl != ptl_m)
69600 + spin_unlock(ptl_m);
69601 + pte_unmap(pte_m);
69602 + unlock_page(page_m);
69603 +}
69604 +
69605 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69606 +{
69607 + struct mm_struct *mm = vma->vm_mm;
69608 + unsigned long address_m;
69609 + spinlock_t *ptl_m;
69610 + struct vm_area_struct *vma_m;
69611 + pmd_t *pmd_m;
69612 + pte_t *pte_m, entry_m;
69613 +
69614 + BUG_ON(!page_m || PageAnon(page_m));
69615 +
69616 + vma_m = pax_find_mirror_vma(vma);
69617 + if (!vma_m)
69618 + return;
69619 +
69620 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69621 + address_m = address + SEGMEXEC_TASK_SIZE;
69622 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69623 + pte_m = pte_offset_map(pmd_m, address_m);
69624 + ptl_m = pte_lockptr(mm, pmd_m);
69625 + if (ptl != ptl_m) {
69626 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69627 + if (!pte_none(*pte_m))
69628 + goto out;
69629 + }
69630 +
69631 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69632 + page_cache_get(page_m);
69633 + page_add_file_rmap(page_m);
69634 + inc_mm_counter_fast(mm, MM_FILEPAGES);
69635 + set_pte_at(mm, address_m, pte_m, entry_m);
69636 + update_mmu_cache(vma_m, address_m, entry_m);
69637 +out:
69638 + if (ptl != ptl_m)
69639 + spin_unlock(ptl_m);
69640 + pte_unmap(pte_m);
69641 +}
69642 +
69643 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
69644 +{
69645 + struct mm_struct *mm = vma->vm_mm;
69646 + unsigned long address_m;
69647 + spinlock_t *ptl_m;
69648 + struct vm_area_struct *vma_m;
69649 + pmd_t *pmd_m;
69650 + pte_t *pte_m, entry_m;
69651 +
69652 + vma_m = pax_find_mirror_vma(vma);
69653 + if (!vma_m)
69654 + return;
69655 +
69656 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69657 + address_m = address + SEGMEXEC_TASK_SIZE;
69658 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69659 + pte_m = pte_offset_map(pmd_m, address_m);
69660 + ptl_m = pte_lockptr(mm, pmd_m);
69661 + if (ptl != ptl_m) {
69662 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69663 + if (!pte_none(*pte_m))
69664 + goto out;
69665 + }
69666 +
69667 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
69668 + set_pte_at(mm, address_m, pte_m, entry_m);
69669 +out:
69670 + if (ptl != ptl_m)
69671 + spin_unlock(ptl_m);
69672 + pte_unmap(pte_m);
69673 +}
69674 +
69675 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
69676 +{
69677 + struct page *page_m;
69678 + pte_t entry;
69679 +
69680 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
69681 + goto out;
69682 +
69683 + entry = *pte;
69684 + page_m = vm_normal_page(vma, address, entry);
69685 + if (!page_m)
69686 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
69687 + else if (PageAnon(page_m)) {
69688 + if (pax_find_mirror_vma(vma)) {
69689 + pte_unmap_unlock(pte, ptl);
69690 + lock_page(page_m);
69691 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
69692 + if (pte_same(entry, *pte))
69693 + pax_mirror_anon_pte(vma, address, page_m, ptl);
69694 + else
69695 + unlock_page(page_m);
69696 + }
69697 + } else
69698 + pax_mirror_file_pte(vma, address, page_m, ptl);
69699 +
69700 +out:
69701 + pte_unmap_unlock(pte, ptl);
69702 +}
69703 +#endif
69704 +
69705 /*
69706 * This routine handles present pages, when users try to write
69707 * to a shared page. It is done by copying the page to a new address
69708 @@ -2675,6 +2868,12 @@ gotten:
69709 */
69710 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69711 if (likely(pte_same(*page_table, orig_pte))) {
69712 +
69713 +#ifdef CONFIG_PAX_SEGMEXEC
69714 + if (pax_find_mirror_vma(vma))
69715 + BUG_ON(!trylock_page(new_page));
69716 +#endif
69717 +
69718 if (old_page) {
69719 if (!PageAnon(old_page)) {
69720 dec_mm_counter_fast(mm, MM_FILEPAGES);
69721 @@ -2726,6 +2925,10 @@ gotten:
69722 page_remove_rmap(old_page);
69723 }
69724
69725 +#ifdef CONFIG_PAX_SEGMEXEC
69726 + pax_mirror_anon_pte(vma, address, new_page, ptl);
69727 +#endif
69728 +
69729 /* Free the old page.. */
69730 new_page = old_page;
69731 ret |= VM_FAULT_WRITE;
69732 @@ -3005,6 +3208,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69733 swap_free(entry);
69734 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
69735 try_to_free_swap(page);
69736 +
69737 +#ifdef CONFIG_PAX_SEGMEXEC
69738 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
69739 +#endif
69740 +
69741 unlock_page(page);
69742 if (swapcache) {
69743 /*
69744 @@ -3028,6 +3236,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69745
69746 /* No need to invalidate - it was non-present before */
69747 update_mmu_cache(vma, address, page_table);
69748 +
69749 +#ifdef CONFIG_PAX_SEGMEXEC
69750 + pax_mirror_anon_pte(vma, address, page, ptl);
69751 +#endif
69752 +
69753 unlock:
69754 pte_unmap_unlock(page_table, ptl);
69755 out:
69756 @@ -3047,40 +3260,6 @@ out_release:
69757 }
69758
69759 /*
69760 - * This is like a special single-page "expand_{down|up}wards()",
69761 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
69762 - * doesn't hit another vma.
69763 - */
69764 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
69765 -{
69766 - address &= PAGE_MASK;
69767 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
69768 - struct vm_area_struct *prev = vma->vm_prev;
69769 -
69770 - /*
69771 - * Is there a mapping abutting this one below?
69772 - *
69773 - * That's only ok if it's the same stack mapping
69774 - * that has gotten split..
69775 - */
69776 - if (prev && prev->vm_end == address)
69777 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
69778 -
69779 - expand_downwards(vma, address - PAGE_SIZE);
69780 - }
69781 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
69782 - struct vm_area_struct *next = vma->vm_next;
69783 -
69784 - /* As VM_GROWSDOWN but s/below/above/ */
69785 - if (next && next->vm_start == address + PAGE_SIZE)
69786 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
69787 -
69788 - expand_upwards(vma, address + PAGE_SIZE);
69789 - }
69790 - return 0;
69791 -}
69792 -
69793 -/*
69794 * We enter with non-exclusive mmap_sem (to exclude vma changes,
69795 * but allow concurrent faults), and pte mapped but not yet locked.
69796 * We return with mmap_sem still held, but pte unmapped and unlocked.
69797 @@ -3089,27 +3268,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
69798 unsigned long address, pte_t *page_table, pmd_t *pmd,
69799 unsigned int flags)
69800 {
69801 - struct page *page;
69802 + struct page *page = NULL;
69803 spinlock_t *ptl;
69804 pte_t entry;
69805
69806 - pte_unmap(page_table);
69807 -
69808 - /* Check if we need to add a guard page to the stack */
69809 - if (check_stack_guard_page(vma, address) < 0)
69810 - return VM_FAULT_SIGBUS;
69811 -
69812 - /* Use the zero-page for reads */
69813 if (!(flags & FAULT_FLAG_WRITE)) {
69814 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
69815 vma->vm_page_prot));
69816 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69817 + ptl = pte_lockptr(mm, pmd);
69818 + spin_lock(ptl);
69819 if (!pte_none(*page_table))
69820 goto unlock;
69821 goto setpte;
69822 }
69823
69824 /* Allocate our own private page. */
69825 + pte_unmap(page_table);
69826 +
69827 if (unlikely(anon_vma_prepare(vma)))
69828 goto oom;
69829 page = alloc_zeroed_user_highpage_movable(vma, address);
69830 @@ -3128,6 +3303,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
69831 if (!pte_none(*page_table))
69832 goto release;
69833
69834 +#ifdef CONFIG_PAX_SEGMEXEC
69835 + if (pax_find_mirror_vma(vma))
69836 + BUG_ON(!trylock_page(page));
69837 +#endif
69838 +
69839 inc_mm_counter_fast(mm, MM_ANONPAGES);
69840 page_add_new_anon_rmap(page, vma, address);
69841 setpte:
69842 @@ -3135,6 +3315,12 @@ setpte:
69843
69844 /* No need to invalidate - it was non-present before */
69845 update_mmu_cache(vma, address, page_table);
69846 +
69847 +#ifdef CONFIG_PAX_SEGMEXEC
69848 + if (page)
69849 + pax_mirror_anon_pte(vma, address, page, ptl);
69850 +#endif
69851 +
69852 unlock:
69853 pte_unmap_unlock(page_table, ptl);
69854 return 0;
69855 @@ -3278,6 +3464,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69856 */
69857 /* Only go through if we didn't race with anybody else... */
69858 if (likely(pte_same(*page_table, orig_pte))) {
69859 +
69860 +#ifdef CONFIG_PAX_SEGMEXEC
69861 + if (anon && pax_find_mirror_vma(vma))
69862 + BUG_ON(!trylock_page(page));
69863 +#endif
69864 +
69865 flush_icache_page(vma, page);
69866 entry = mk_pte(page, vma->vm_page_prot);
69867 if (flags & FAULT_FLAG_WRITE)
69868 @@ -3297,6 +3489,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69869
69870 /* no need to invalidate: a not-present page won't be cached */
69871 update_mmu_cache(vma, address, page_table);
69872 +
69873 +#ifdef CONFIG_PAX_SEGMEXEC
69874 + if (anon)
69875 + pax_mirror_anon_pte(vma, address, page, ptl);
69876 + else
69877 + pax_mirror_file_pte(vma, address, page, ptl);
69878 +#endif
69879 +
69880 } else {
69881 if (cow_page)
69882 mem_cgroup_uncharge_page(cow_page);
69883 @@ -3450,6 +3650,12 @@ int handle_pte_fault(struct mm_struct *mm,
69884 if (flags & FAULT_FLAG_WRITE)
69885 flush_tlb_fix_spurious_fault(vma, address);
69886 }
69887 +
69888 +#ifdef CONFIG_PAX_SEGMEXEC
69889 + pax_mirror_pte(vma, address, pte, pmd, ptl);
69890 + return 0;
69891 +#endif
69892 +
69893 unlock:
69894 pte_unmap_unlock(pte, ptl);
69895 return 0;
69896 @@ -3466,6 +3672,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69897 pmd_t *pmd;
69898 pte_t *pte;
69899
69900 +#ifdef CONFIG_PAX_SEGMEXEC
69901 + struct vm_area_struct *vma_m;
69902 +#endif
69903 +
69904 __set_current_state(TASK_RUNNING);
69905
69906 count_vm_event(PGFAULT);
69907 @@ -3477,6 +3687,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69908 if (unlikely(is_vm_hugetlb_page(vma)))
69909 return hugetlb_fault(mm, vma, address, flags);
69910
69911 +#ifdef CONFIG_PAX_SEGMEXEC
69912 + vma_m = pax_find_mirror_vma(vma);
69913 + if (vma_m) {
69914 + unsigned long address_m;
69915 + pgd_t *pgd_m;
69916 + pud_t *pud_m;
69917 + pmd_t *pmd_m;
69918 +
69919 + if (vma->vm_start > vma_m->vm_start) {
69920 + address_m = address;
69921 + address -= SEGMEXEC_TASK_SIZE;
69922 + vma = vma_m;
69923 + } else
69924 + address_m = address + SEGMEXEC_TASK_SIZE;
69925 +
69926 + pgd_m = pgd_offset(mm, address_m);
69927 + pud_m = pud_alloc(mm, pgd_m, address_m);
69928 + if (!pud_m)
69929 + return VM_FAULT_OOM;
69930 + pmd_m = pmd_alloc(mm, pud_m, address_m);
69931 + if (!pmd_m)
69932 + return VM_FAULT_OOM;
69933 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
69934 + return VM_FAULT_OOM;
69935 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
69936 + }
69937 +#endif
69938 +
69939 pgd = pgd_offset(mm, address);
69940 pud = pud_alloc(mm, pgd, address);
69941 if (!pud)
69942 @@ -3506,7 +3744,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69943 * run pte_offset_map on the pmd, if an huge pmd could
69944 * materialize from under us from a different thread.
69945 */
69946 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
69947 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69948 return VM_FAULT_OOM;
69949 /* if an huge pmd materialized from under us just retry later */
69950 if (unlikely(pmd_trans_huge(*pmd)))
69951 @@ -3610,7 +3848,7 @@ static int __init gate_vma_init(void)
69952 gate_vma.vm_start = FIXADDR_USER_START;
69953 gate_vma.vm_end = FIXADDR_USER_END;
69954 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
69955 - gate_vma.vm_page_prot = __P101;
69956 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
69957 /*
69958 * Make sure the vDSO gets into every core dump.
69959 * Dumping its contents makes post-mortem fully interpretable later
69960 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
69961 index 47296fe..5c3d263 100644
69962 --- a/mm/mempolicy.c
69963 +++ b/mm/mempolicy.c
69964 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
69965 unsigned long vmstart;
69966 unsigned long vmend;
69967
69968 +#ifdef CONFIG_PAX_SEGMEXEC
69969 + struct vm_area_struct *vma_m;
69970 +#endif
69971 +
69972 vma = find_vma(mm, start);
69973 if (!vma || vma->vm_start > start)
69974 return -EFAULT;
69975 @@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
69976 err = policy_vma(vma, new_pol);
69977 if (err)
69978 goto out;
69979 +
69980 +#ifdef CONFIG_PAX_SEGMEXEC
69981 + vma_m = pax_find_mirror_vma(vma);
69982 + if (vma_m) {
69983 + err = policy_vma(vma_m, new_pol);
69984 + if (err)
69985 + goto out;
69986 + }
69987 +#endif
69988 +
69989 }
69990
69991 out:
69992 @@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
69993
69994 if (end < start)
69995 return -EINVAL;
69996 +
69997 +#ifdef CONFIG_PAX_SEGMEXEC
69998 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69999 + if (end > SEGMEXEC_TASK_SIZE)
70000 + return -EINVAL;
70001 + } else
70002 +#endif
70003 +
70004 + if (end > TASK_SIZE)
70005 + return -EINVAL;
70006 +
70007 if (end == start)
70008 return 0;
70009
70010 @@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70011 if (!mm)
70012 goto out;
70013
70014 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70015 + if (mm != current->mm &&
70016 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70017 + err = -EPERM;
70018 + goto out;
70019 + }
70020 +#endif
70021 +
70022 /*
70023 * Check if this process has the right to modify the specified
70024 * process. The right exists if the process has administrative
70025 @@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70026 rcu_read_lock();
70027 tcred = __task_cred(task);
70028 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70029 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70030 - !capable(CAP_SYS_NICE)) {
70031 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70032 rcu_read_unlock();
70033 err = -EPERM;
70034 goto out;
70035 diff --git a/mm/migrate.c b/mm/migrate.c
70036 index 1503b6b..156c672 100644
70037 --- a/mm/migrate.c
70038 +++ b/mm/migrate.c
70039 @@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70040 if (!mm)
70041 return -EINVAL;
70042
70043 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70044 + if (mm != current->mm &&
70045 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70046 + err = -EPERM;
70047 + goto out;
70048 + }
70049 +#endif
70050 +
70051 /*
70052 * Check if this process has the right to modify the specified
70053 * process. The right exists if the process has administrative
70054 @@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70055 rcu_read_lock();
70056 tcred = __task_cred(task);
70057 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70058 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70059 - !capable(CAP_SYS_NICE)) {
70060 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70061 rcu_read_unlock();
70062 err = -EPERM;
70063 goto out;
70064 diff --git a/mm/mlock.c b/mm/mlock.c
70065 index ef726e8..13e0901 100644
70066 --- a/mm/mlock.c
70067 +++ b/mm/mlock.c
70068 @@ -13,6 +13,7 @@
70069 #include <linux/pagemap.h>
70070 #include <linux/mempolicy.h>
70071 #include <linux/syscalls.h>
70072 +#include <linux/security.h>
70073 #include <linux/sched.h>
70074 #include <linux/export.h>
70075 #include <linux/rmap.h>
70076 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70077 return -EINVAL;
70078 if (end == start)
70079 return 0;
70080 + if (end > TASK_SIZE)
70081 + return -EINVAL;
70082 +
70083 vma = find_vma(current->mm, start);
70084 if (!vma || vma->vm_start > start)
70085 return -ENOMEM;
70086 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70087 for (nstart = start ; ; ) {
70088 vm_flags_t newflags;
70089
70090 +#ifdef CONFIG_PAX_SEGMEXEC
70091 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70092 + break;
70093 +#endif
70094 +
70095 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70096
70097 newflags = vma->vm_flags | VM_LOCKED;
70098 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70099 lock_limit >>= PAGE_SHIFT;
70100
70101 /* check against resource limits */
70102 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70103 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70104 error = do_mlock(start, len, 1);
70105 up_write(&current->mm->mmap_sem);
70106 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70107 static int do_mlockall(int flags)
70108 {
70109 struct vm_area_struct * vma, * prev = NULL;
70110 - unsigned int def_flags = 0;
70111
70112 if (flags & MCL_FUTURE)
70113 - def_flags = VM_LOCKED;
70114 - current->mm->def_flags = def_flags;
70115 + current->mm->def_flags |= VM_LOCKED;
70116 + else
70117 + current->mm->def_flags &= ~VM_LOCKED;
70118 if (flags == MCL_FUTURE)
70119 goto out;
70120
70121 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70122 vm_flags_t newflags;
70123
70124 +#ifdef CONFIG_PAX_SEGMEXEC
70125 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70126 + break;
70127 +#endif
70128 +
70129 + BUG_ON(vma->vm_end > TASK_SIZE);
70130 newflags = vma->vm_flags | VM_LOCKED;
70131 if (!(flags & MCL_CURRENT))
70132 newflags &= ~VM_LOCKED;
70133 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70134 lock_limit >>= PAGE_SHIFT;
70135
70136 ret = -ENOMEM;
70137 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70138 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70139 capable(CAP_IPC_LOCK))
70140 ret = do_mlockall(flags);
70141 diff --git a/mm/mmap.c b/mm/mmap.c
70142 index da15a79..2e3d9ff 100644
70143 --- a/mm/mmap.c
70144 +++ b/mm/mmap.c
70145 @@ -46,6 +46,16 @@
70146 #define arch_rebalance_pgtables(addr, len) (addr)
70147 #endif
70148
70149 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70150 +{
70151 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70152 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70153 + up_read(&mm->mmap_sem);
70154 + BUG();
70155 + }
70156 +#endif
70157 +}
70158 +
70159 static void unmap_region(struct mm_struct *mm,
70160 struct vm_area_struct *vma, struct vm_area_struct *prev,
70161 unsigned long start, unsigned long end);
70162 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70163 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70164 *
70165 */
70166 -pgprot_t protection_map[16] = {
70167 +pgprot_t protection_map[16] __read_only = {
70168 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70169 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70170 };
70171
70172 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70173 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70174 {
70175 - return __pgprot(pgprot_val(protection_map[vm_flags &
70176 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70177 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70178 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70179 +
70180 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70181 + if (!(__supported_pte_mask & _PAGE_NX) &&
70182 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70183 + (vm_flags & (VM_READ | VM_WRITE)))
70184 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70185 +#endif
70186 +
70187 + return prot;
70188 }
70189 EXPORT_SYMBOL(vm_get_page_prot);
70190
70191 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70192 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70193 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70194 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70195 /*
70196 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70197 * other variables. It can be updated by several CPUs frequently.
70198 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70199 struct vm_area_struct *next = vma->vm_next;
70200
70201 might_sleep();
70202 + BUG_ON(vma->vm_mirror);
70203 if (vma->vm_ops && vma->vm_ops->close)
70204 vma->vm_ops->close(vma);
70205 if (vma->vm_file) {
70206 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70207 * not page aligned -Ram Gupta
70208 */
70209 rlim = rlimit(RLIMIT_DATA);
70210 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70211 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70212 (mm->end_data - mm->start_data) > rlim)
70213 goto out;
70214 @@ -689,6 +711,12 @@ static int
70215 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70216 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70217 {
70218 +
70219 +#ifdef CONFIG_PAX_SEGMEXEC
70220 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70221 + return 0;
70222 +#endif
70223 +
70224 if (is_mergeable_vma(vma, file, vm_flags) &&
70225 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70226 if (vma->vm_pgoff == vm_pgoff)
70227 @@ -708,6 +736,12 @@ static int
70228 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70229 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70230 {
70231 +
70232 +#ifdef CONFIG_PAX_SEGMEXEC
70233 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70234 + return 0;
70235 +#endif
70236 +
70237 if (is_mergeable_vma(vma, file, vm_flags) &&
70238 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70239 pgoff_t vm_pglen;
70240 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70241 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70242 struct vm_area_struct *prev, unsigned long addr,
70243 unsigned long end, unsigned long vm_flags,
70244 - struct anon_vma *anon_vma, struct file *file,
70245 + struct anon_vma *anon_vma, struct file *file,
70246 pgoff_t pgoff, struct mempolicy *policy)
70247 {
70248 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70249 struct vm_area_struct *area, *next;
70250 int err;
70251
70252 +#ifdef CONFIG_PAX_SEGMEXEC
70253 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70254 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70255 +
70256 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70257 +#endif
70258 +
70259 /*
70260 * We later require that vma->vm_flags == vm_flags,
70261 * so this tests vma->vm_flags & VM_SPECIAL, too.
70262 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70263 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70264 next = next->vm_next;
70265
70266 +#ifdef CONFIG_PAX_SEGMEXEC
70267 + if (prev)
70268 + prev_m = pax_find_mirror_vma(prev);
70269 + if (area)
70270 + area_m = pax_find_mirror_vma(area);
70271 + if (next)
70272 + next_m = pax_find_mirror_vma(next);
70273 +#endif
70274 +
70275 /*
70276 * Can it merge with the predecessor?
70277 */
70278 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70279 /* cases 1, 6 */
70280 err = vma_adjust(prev, prev->vm_start,
70281 next->vm_end, prev->vm_pgoff, NULL);
70282 - } else /* cases 2, 5, 7 */
70283 +
70284 +#ifdef CONFIG_PAX_SEGMEXEC
70285 + if (!err && prev_m)
70286 + err = vma_adjust(prev_m, prev_m->vm_start,
70287 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70288 +#endif
70289 +
70290 + } else { /* cases 2, 5, 7 */
70291 err = vma_adjust(prev, prev->vm_start,
70292 end, prev->vm_pgoff, NULL);
70293 +
70294 +#ifdef CONFIG_PAX_SEGMEXEC
70295 + if (!err && prev_m)
70296 + err = vma_adjust(prev_m, prev_m->vm_start,
70297 + end_m, prev_m->vm_pgoff, NULL);
70298 +#endif
70299 +
70300 + }
70301 if (err)
70302 return NULL;
70303 khugepaged_enter_vma_merge(prev);
70304 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70305 mpol_equal(policy, vma_policy(next)) &&
70306 can_vma_merge_before(next, vm_flags,
70307 anon_vma, file, pgoff+pglen)) {
70308 - if (prev && addr < prev->vm_end) /* case 4 */
70309 + if (prev && addr < prev->vm_end) { /* case 4 */
70310 err = vma_adjust(prev, prev->vm_start,
70311 addr, prev->vm_pgoff, NULL);
70312 - else /* cases 3, 8 */
70313 +
70314 +#ifdef CONFIG_PAX_SEGMEXEC
70315 + if (!err && prev_m)
70316 + err = vma_adjust(prev_m, prev_m->vm_start,
70317 + addr_m, prev_m->vm_pgoff, NULL);
70318 +#endif
70319 +
70320 + } else { /* cases 3, 8 */
70321 err = vma_adjust(area, addr, next->vm_end,
70322 next->vm_pgoff - pglen, NULL);
70323 +
70324 +#ifdef CONFIG_PAX_SEGMEXEC
70325 + if (!err && area_m)
70326 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70327 + next_m->vm_pgoff - pglen, NULL);
70328 +#endif
70329 +
70330 + }
70331 if (err)
70332 return NULL;
70333 khugepaged_enter_vma_merge(area);
70334 @@ -921,14 +1001,11 @@ none:
70335 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70336 struct file *file, long pages)
70337 {
70338 - const unsigned long stack_flags
70339 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70340 -
70341 if (file) {
70342 mm->shared_vm += pages;
70343 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70344 mm->exec_vm += pages;
70345 - } else if (flags & stack_flags)
70346 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70347 mm->stack_vm += pages;
70348 if (flags & (VM_RESERVED|VM_IO))
70349 mm->reserved_vm += pages;
70350 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70351 * (the exception is when the underlying filesystem is noexec
70352 * mounted, in which case we dont add PROT_EXEC.)
70353 */
70354 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70355 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70356 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70357 prot |= PROT_EXEC;
70358
70359 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70360 /* Obtain the address to map to. we verify (or select) it and ensure
70361 * that it represents a valid section of the address space.
70362 */
70363 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70364 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70365 if (addr & ~PAGE_MASK)
70366 return addr;
70367
70368 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70369 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70370 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70371
70372 +#ifdef CONFIG_PAX_MPROTECT
70373 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70374 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70375 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70376 + gr_log_rwxmmap(file);
70377 +
70378 +#ifdef CONFIG_PAX_EMUPLT
70379 + vm_flags &= ~VM_EXEC;
70380 +#else
70381 + return -EPERM;
70382 +#endif
70383 +
70384 + }
70385 +
70386 + if (!(vm_flags & VM_EXEC))
70387 + vm_flags &= ~VM_MAYEXEC;
70388 +#else
70389 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70390 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70391 +#endif
70392 + else
70393 + vm_flags &= ~VM_MAYWRITE;
70394 + }
70395 +#endif
70396 +
70397 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70398 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70399 + vm_flags &= ~VM_PAGEEXEC;
70400 +#endif
70401 +
70402 if (flags & MAP_LOCKED)
70403 if (!can_do_mlock())
70404 return -EPERM;
70405 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70406 locked += mm->locked_vm;
70407 lock_limit = rlimit(RLIMIT_MEMLOCK);
70408 lock_limit >>= PAGE_SHIFT;
70409 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70410 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70411 return -EAGAIN;
70412 }
70413 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70414 if (error)
70415 return error;
70416
70417 + if (!gr_acl_handle_mmap(file, prot))
70418 + return -EACCES;
70419 +
70420 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70421 }
70422 EXPORT_SYMBOL(do_mmap_pgoff);
70423 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70424 vm_flags_t vm_flags = vma->vm_flags;
70425
70426 /* If it was private or non-writable, the write bit is already clear */
70427 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70428 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70429 return 0;
70430
70431 /* The backer wishes to know when pages are first written to? */
70432 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70433 unsigned long charged = 0;
70434 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70435
70436 +#ifdef CONFIG_PAX_SEGMEXEC
70437 + struct vm_area_struct *vma_m = NULL;
70438 +#endif
70439 +
70440 + /*
70441 + * mm->mmap_sem is required to protect against another thread
70442 + * changing the mappings in case we sleep.
70443 + */
70444 + verify_mm_writelocked(mm);
70445 +
70446 /* Clear old maps */
70447 error = -ENOMEM;
70448 -munmap_back:
70449 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70450 if (vma && vma->vm_start < addr + len) {
70451 if (do_munmap(mm, addr, len))
70452 return -ENOMEM;
70453 - goto munmap_back;
70454 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70455 + BUG_ON(vma && vma->vm_start < addr + len);
70456 }
70457
70458 /* Check against address space limit. */
70459 @@ -1258,6 +1379,16 @@ munmap_back:
70460 goto unacct_error;
70461 }
70462
70463 +#ifdef CONFIG_PAX_SEGMEXEC
70464 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70465 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70466 + if (!vma_m) {
70467 + error = -ENOMEM;
70468 + goto free_vma;
70469 + }
70470 + }
70471 +#endif
70472 +
70473 vma->vm_mm = mm;
70474 vma->vm_start = addr;
70475 vma->vm_end = addr + len;
70476 @@ -1282,6 +1413,19 @@ munmap_back:
70477 error = file->f_op->mmap(file, vma);
70478 if (error)
70479 goto unmap_and_free_vma;
70480 +
70481 +#ifdef CONFIG_PAX_SEGMEXEC
70482 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70483 + added_exe_file_vma(mm);
70484 +#endif
70485 +
70486 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70487 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70488 + vma->vm_flags |= VM_PAGEEXEC;
70489 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70490 + }
70491 +#endif
70492 +
70493 if (vm_flags & VM_EXECUTABLE)
70494 added_exe_file_vma(mm);
70495
70496 @@ -1319,6 +1463,11 @@ munmap_back:
70497 vma_link(mm, vma, prev, rb_link, rb_parent);
70498 file = vma->vm_file;
70499
70500 +#ifdef CONFIG_PAX_SEGMEXEC
70501 + if (vma_m)
70502 + BUG_ON(pax_mirror_vma(vma_m, vma));
70503 +#endif
70504 +
70505 /* Once vma denies write, undo our temporary denial count */
70506 if (correct_wcount)
70507 atomic_inc(&inode->i_writecount);
70508 @@ -1327,6 +1476,7 @@ out:
70509
70510 mm->total_vm += len >> PAGE_SHIFT;
70511 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70512 + track_exec_limit(mm, addr, addr + len, vm_flags);
70513 if (vm_flags & VM_LOCKED) {
70514 if (!mlock_vma_pages_range(vma, addr, addr + len))
70515 mm->locked_vm += (len >> PAGE_SHIFT);
70516 @@ -1344,6 +1494,12 @@ unmap_and_free_vma:
70517 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70518 charged = 0;
70519 free_vma:
70520 +
70521 +#ifdef CONFIG_PAX_SEGMEXEC
70522 + if (vma_m)
70523 + kmem_cache_free(vm_area_cachep, vma_m);
70524 +#endif
70525 +
70526 kmem_cache_free(vm_area_cachep, vma);
70527 unacct_error:
70528 if (charged)
70529 @@ -1351,6 +1507,44 @@ unacct_error:
70530 return error;
70531 }
70532
70533 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70534 +{
70535 + if (!vma) {
70536 +#ifdef CONFIG_STACK_GROWSUP
70537 + if (addr > sysctl_heap_stack_gap)
70538 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70539 + else
70540 + vma = find_vma(current->mm, 0);
70541 + if (vma && (vma->vm_flags & VM_GROWSUP))
70542 + return false;
70543 +#endif
70544 + return true;
70545 + }
70546 +
70547 + if (addr + len > vma->vm_start)
70548 + return false;
70549 +
70550 + if (vma->vm_flags & VM_GROWSDOWN)
70551 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70552 +#ifdef CONFIG_STACK_GROWSUP
70553 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70554 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70555 +#endif
70556 +
70557 + return true;
70558 +}
70559 +
70560 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70561 +{
70562 + if (vma->vm_start < len)
70563 + return -ENOMEM;
70564 + if (!(vma->vm_flags & VM_GROWSDOWN))
70565 + return vma->vm_start - len;
70566 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70567 + return vma->vm_start - len - sysctl_heap_stack_gap;
70568 + return -ENOMEM;
70569 +}
70570 +
70571 /* Get an address range which is currently unmapped.
70572 * For shmat() with addr=0.
70573 *
70574 @@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70575 if (flags & MAP_FIXED)
70576 return addr;
70577
70578 +#ifdef CONFIG_PAX_RANDMMAP
70579 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70580 +#endif
70581 +
70582 if (addr) {
70583 addr = PAGE_ALIGN(addr);
70584 - vma = find_vma(mm, addr);
70585 - if (TASK_SIZE - len >= addr &&
70586 - (!vma || addr + len <= vma->vm_start))
70587 - return addr;
70588 + if (TASK_SIZE - len >= addr) {
70589 + vma = find_vma(mm, addr);
70590 + if (check_heap_stack_gap(vma, addr, len))
70591 + return addr;
70592 + }
70593 }
70594 if (len > mm->cached_hole_size) {
70595 - start_addr = addr = mm->free_area_cache;
70596 + start_addr = addr = mm->free_area_cache;
70597 } else {
70598 - start_addr = addr = TASK_UNMAPPED_BASE;
70599 - mm->cached_hole_size = 0;
70600 + start_addr = addr = mm->mmap_base;
70601 + mm->cached_hole_size = 0;
70602 }
70603
70604 full_search:
70605 @@ -1399,34 +1598,40 @@ full_search:
70606 * Start a new search - just in case we missed
70607 * some holes.
70608 */
70609 - if (start_addr != TASK_UNMAPPED_BASE) {
70610 - addr = TASK_UNMAPPED_BASE;
70611 - start_addr = addr;
70612 + if (start_addr != mm->mmap_base) {
70613 + start_addr = addr = mm->mmap_base;
70614 mm->cached_hole_size = 0;
70615 goto full_search;
70616 }
70617 return -ENOMEM;
70618 }
70619 - if (!vma || addr + len <= vma->vm_start) {
70620 - /*
70621 - * Remember the place where we stopped the search:
70622 - */
70623 - mm->free_area_cache = addr + len;
70624 - return addr;
70625 - }
70626 + if (check_heap_stack_gap(vma, addr, len))
70627 + break;
70628 if (addr + mm->cached_hole_size < vma->vm_start)
70629 mm->cached_hole_size = vma->vm_start - addr;
70630 addr = vma->vm_end;
70631 }
70632 +
70633 + /*
70634 + * Remember the place where we stopped the search:
70635 + */
70636 + mm->free_area_cache = addr + len;
70637 + return addr;
70638 }
70639 #endif
70640
70641 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
70642 {
70643 +
70644 +#ifdef CONFIG_PAX_SEGMEXEC
70645 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70646 + return;
70647 +#endif
70648 +
70649 /*
70650 * Is this a new hole at the lowest possible address?
70651 */
70652 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
70653 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
70654 mm->free_area_cache = addr;
70655 mm->cached_hole_size = ~0UL;
70656 }
70657 @@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70658 {
70659 struct vm_area_struct *vma;
70660 struct mm_struct *mm = current->mm;
70661 - unsigned long addr = addr0;
70662 + unsigned long base = mm->mmap_base, addr = addr0;
70663
70664 /* requested length too big for entire address space */
70665 if (len > TASK_SIZE)
70666 @@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70667 if (flags & MAP_FIXED)
70668 return addr;
70669
70670 +#ifdef CONFIG_PAX_RANDMMAP
70671 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70672 +#endif
70673 +
70674 /* requesting a specific address */
70675 if (addr) {
70676 addr = PAGE_ALIGN(addr);
70677 - vma = find_vma(mm, addr);
70678 - if (TASK_SIZE - len >= addr &&
70679 - (!vma || addr + len <= vma->vm_start))
70680 - return addr;
70681 + if (TASK_SIZE - len >= addr) {
70682 + vma = find_vma(mm, addr);
70683 + if (check_heap_stack_gap(vma, addr, len))
70684 + return addr;
70685 + }
70686 }
70687
70688 /* check if free_area_cache is useful for us */
70689 @@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70690 /* make sure it can fit in the remaining address space */
70691 if (addr > len) {
70692 vma = find_vma(mm, addr-len);
70693 - if (!vma || addr <= vma->vm_start)
70694 + if (check_heap_stack_gap(vma, addr - len, len))
70695 /* remember the address as a hint for next time */
70696 return (mm->free_area_cache = addr-len);
70697 }
70698 @@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70699 * return with success:
70700 */
70701 vma = find_vma(mm, addr);
70702 - if (!vma || addr+len <= vma->vm_start)
70703 + if (check_heap_stack_gap(vma, addr, len))
70704 /* remember the address as a hint for next time */
70705 return (mm->free_area_cache = addr);
70706
70707 @@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70708 mm->cached_hole_size = vma->vm_start - addr;
70709
70710 /* try just below the current vma->vm_start */
70711 - addr = vma->vm_start-len;
70712 - } while (len < vma->vm_start);
70713 + addr = skip_heap_stack_gap(vma, len);
70714 + } while (!IS_ERR_VALUE(addr));
70715
70716 bottomup:
70717 /*
70718 @@ -1510,13 +1720,21 @@ bottomup:
70719 * can happen with large stack limits and large mmap()
70720 * allocations.
70721 */
70722 + mm->mmap_base = TASK_UNMAPPED_BASE;
70723 +
70724 +#ifdef CONFIG_PAX_RANDMMAP
70725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70726 + mm->mmap_base += mm->delta_mmap;
70727 +#endif
70728 +
70729 + mm->free_area_cache = mm->mmap_base;
70730 mm->cached_hole_size = ~0UL;
70731 - mm->free_area_cache = TASK_UNMAPPED_BASE;
70732 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
70733 /*
70734 * Restore the topdown base:
70735 */
70736 - mm->free_area_cache = mm->mmap_base;
70737 + mm->mmap_base = base;
70738 + mm->free_area_cache = base;
70739 mm->cached_hole_size = ~0UL;
70740
70741 return addr;
70742 @@ -1525,6 +1743,12 @@ bottomup:
70743
70744 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70745 {
70746 +
70747 +#ifdef CONFIG_PAX_SEGMEXEC
70748 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70749 + return;
70750 +#endif
70751 +
70752 /*
70753 * Is this a new hole at the highest possible address?
70754 */
70755 @@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70756 mm->free_area_cache = addr;
70757
70758 /* dont allow allocations above current base */
70759 - if (mm->free_area_cache > mm->mmap_base)
70760 + if (mm->free_area_cache > mm->mmap_base) {
70761 mm->free_area_cache = mm->mmap_base;
70762 + mm->cached_hole_size = ~0UL;
70763 + }
70764 }
70765
70766 unsigned long
70767 @@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
70768 return vma;
70769 }
70770
70771 +#ifdef CONFIG_PAX_SEGMEXEC
70772 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
70773 +{
70774 + struct vm_area_struct *vma_m;
70775 +
70776 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
70777 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
70778 + BUG_ON(vma->vm_mirror);
70779 + return NULL;
70780 + }
70781 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
70782 + vma_m = vma->vm_mirror;
70783 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
70784 + BUG_ON(vma->vm_file != vma_m->vm_file);
70785 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
70786 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
70787 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
70788 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
70789 + return vma_m;
70790 +}
70791 +#endif
70792 +
70793 /*
70794 * Verify that the stack growth is acceptable and
70795 * update accounting. This is shared with both the
70796 @@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70797 return -ENOMEM;
70798
70799 /* Stack limit test */
70800 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
70801 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
70802 return -ENOMEM;
70803
70804 @@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70805 locked = mm->locked_vm + grow;
70806 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
70807 limit >>= PAGE_SHIFT;
70808 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70809 if (locked > limit && !capable(CAP_IPC_LOCK))
70810 return -ENOMEM;
70811 }
70812 @@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
70813 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
70814 * vma is the last one with address > vma->vm_end. Have to extend vma.
70815 */
70816 +#ifndef CONFIG_IA64
70817 +static
70818 +#endif
70819 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70820 {
70821 int error;
70822 + bool locknext;
70823
70824 if (!(vma->vm_flags & VM_GROWSUP))
70825 return -EFAULT;
70826
70827 + /* Also guard against wrapping around to address 0. */
70828 + if (address < PAGE_ALIGN(address+1))
70829 + address = PAGE_ALIGN(address+1);
70830 + else
70831 + return -ENOMEM;
70832 +
70833 /*
70834 * We must make sure the anon_vma is allocated
70835 * so that the anon_vma locking is not a noop.
70836 */
70837 if (unlikely(anon_vma_prepare(vma)))
70838 return -ENOMEM;
70839 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
70840 + if (locknext && anon_vma_prepare(vma->vm_next))
70841 + return -ENOMEM;
70842 vma_lock_anon_vma(vma);
70843 + if (locknext)
70844 + vma_lock_anon_vma(vma->vm_next);
70845
70846 /*
70847 * vma->vm_start/vm_end cannot change under us because the caller
70848 * is required to hold the mmap_sem in read mode. We need the
70849 - * anon_vma lock to serialize against concurrent expand_stacks.
70850 - * Also guard against wrapping around to address 0.
70851 + * anon_vma locks to serialize against concurrent expand_stacks
70852 + * and expand_upwards.
70853 */
70854 - if (address < PAGE_ALIGN(address+4))
70855 - address = PAGE_ALIGN(address+4);
70856 - else {
70857 - vma_unlock_anon_vma(vma);
70858 - return -ENOMEM;
70859 - }
70860 error = 0;
70861
70862 /* Somebody else might have raced and expanded it already */
70863 - if (address > vma->vm_end) {
70864 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
70865 + error = -ENOMEM;
70866 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
70867 unsigned long size, grow;
70868
70869 size = address - vma->vm_start;
70870 @@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70871 }
70872 }
70873 }
70874 + if (locknext)
70875 + vma_unlock_anon_vma(vma->vm_next);
70876 vma_unlock_anon_vma(vma);
70877 khugepaged_enter_vma_merge(vma);
70878 return error;
70879 @@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
70880 unsigned long address)
70881 {
70882 int error;
70883 + bool lockprev = false;
70884 + struct vm_area_struct *prev;
70885
70886 /*
70887 * We must make sure the anon_vma is allocated
70888 @@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
70889 if (error)
70890 return error;
70891
70892 + prev = vma->vm_prev;
70893 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
70894 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
70895 +#endif
70896 + if (lockprev && anon_vma_prepare(prev))
70897 + return -ENOMEM;
70898 + if (lockprev)
70899 + vma_lock_anon_vma(prev);
70900 +
70901 vma_lock_anon_vma(vma);
70902
70903 /*
70904 @@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
70905 */
70906
70907 /* Somebody else might have raced and expanded it already */
70908 - if (address < vma->vm_start) {
70909 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
70910 + error = -ENOMEM;
70911 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
70912 unsigned long size, grow;
70913
70914 +#ifdef CONFIG_PAX_SEGMEXEC
70915 + struct vm_area_struct *vma_m;
70916 +
70917 + vma_m = pax_find_mirror_vma(vma);
70918 +#endif
70919 +
70920 size = vma->vm_end - address;
70921 grow = (vma->vm_start - address) >> PAGE_SHIFT;
70922
70923 @@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
70924 if (!error) {
70925 vma->vm_start = address;
70926 vma->vm_pgoff -= grow;
70927 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
70928 +
70929 +#ifdef CONFIG_PAX_SEGMEXEC
70930 + if (vma_m) {
70931 + vma_m->vm_start -= grow << PAGE_SHIFT;
70932 + vma_m->vm_pgoff -= grow;
70933 + }
70934 +#endif
70935 +
70936 perf_event_mmap(vma);
70937 }
70938 }
70939 }
70940 vma_unlock_anon_vma(vma);
70941 + if (lockprev)
70942 + vma_unlock_anon_vma(prev);
70943 khugepaged_enter_vma_merge(vma);
70944 return error;
70945 }
70946 @@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
70947 do {
70948 long nrpages = vma_pages(vma);
70949
70950 +#ifdef CONFIG_PAX_SEGMEXEC
70951 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
70952 + vma = remove_vma(vma);
70953 + continue;
70954 + }
70955 +#endif
70956 +
70957 mm->total_vm -= nrpages;
70958 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
70959 vma = remove_vma(vma);
70960 @@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
70961 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
70962 vma->vm_prev = NULL;
70963 do {
70964 +
70965 +#ifdef CONFIG_PAX_SEGMEXEC
70966 + if (vma->vm_mirror) {
70967 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
70968 + vma->vm_mirror->vm_mirror = NULL;
70969 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
70970 + vma->vm_mirror = NULL;
70971 + }
70972 +#endif
70973 +
70974 rb_erase(&vma->vm_rb, &mm->mm_rb);
70975 mm->map_count--;
70976 tail_vma = vma;
70977 @@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
70978 struct vm_area_struct *new;
70979 int err = -ENOMEM;
70980
70981 +#ifdef CONFIG_PAX_SEGMEXEC
70982 + struct vm_area_struct *vma_m, *new_m = NULL;
70983 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
70984 +#endif
70985 +
70986 if (is_vm_hugetlb_page(vma) && (addr &
70987 ~(huge_page_mask(hstate_vma(vma)))))
70988 return -EINVAL;
70989
70990 +#ifdef CONFIG_PAX_SEGMEXEC
70991 + vma_m = pax_find_mirror_vma(vma);
70992 +#endif
70993 +
70994 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70995 if (!new)
70996 goto out_err;
70997
70998 +#ifdef CONFIG_PAX_SEGMEXEC
70999 + if (vma_m) {
71000 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71001 + if (!new_m) {
71002 + kmem_cache_free(vm_area_cachep, new);
71003 + goto out_err;
71004 + }
71005 + }
71006 +#endif
71007 +
71008 /* most fields are the same, copy all, and then fixup */
71009 *new = *vma;
71010
71011 @@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71012 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71013 }
71014
71015 +#ifdef CONFIG_PAX_SEGMEXEC
71016 + if (vma_m) {
71017 + *new_m = *vma_m;
71018 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71019 + new_m->vm_mirror = new;
71020 + new->vm_mirror = new_m;
71021 +
71022 + if (new_below)
71023 + new_m->vm_end = addr_m;
71024 + else {
71025 + new_m->vm_start = addr_m;
71026 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71027 + }
71028 + }
71029 +#endif
71030 +
71031 pol = mpol_dup(vma_policy(vma));
71032 if (IS_ERR(pol)) {
71033 err = PTR_ERR(pol);
71034 @@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71035 else
71036 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71037
71038 +#ifdef CONFIG_PAX_SEGMEXEC
71039 + if (!err && vma_m) {
71040 + if (anon_vma_clone(new_m, vma_m))
71041 + goto out_free_mpol;
71042 +
71043 + mpol_get(pol);
71044 + vma_set_policy(new_m, pol);
71045 +
71046 + if (new_m->vm_file) {
71047 + get_file(new_m->vm_file);
71048 + if (vma_m->vm_flags & VM_EXECUTABLE)
71049 + added_exe_file_vma(mm);
71050 + }
71051 +
71052 + if (new_m->vm_ops && new_m->vm_ops->open)
71053 + new_m->vm_ops->open(new_m);
71054 +
71055 + if (new_below)
71056 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71057 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71058 + else
71059 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71060 +
71061 + if (err) {
71062 + if (new_m->vm_ops && new_m->vm_ops->close)
71063 + new_m->vm_ops->close(new_m);
71064 + if (new_m->vm_file) {
71065 + if (vma_m->vm_flags & VM_EXECUTABLE)
71066 + removed_exe_file_vma(mm);
71067 + fput(new_m->vm_file);
71068 + }
71069 + mpol_put(pol);
71070 + }
71071 + }
71072 +#endif
71073 +
71074 /* Success. */
71075 if (!err)
71076 return 0;
71077 @@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71078 removed_exe_file_vma(mm);
71079 fput(new->vm_file);
71080 }
71081 - unlink_anon_vmas(new);
71082 out_free_mpol:
71083 mpol_put(pol);
71084 out_free_vma:
71085 +
71086 +#ifdef CONFIG_PAX_SEGMEXEC
71087 + if (new_m) {
71088 + unlink_anon_vmas(new_m);
71089 + kmem_cache_free(vm_area_cachep, new_m);
71090 + }
71091 +#endif
71092 +
71093 + unlink_anon_vmas(new);
71094 kmem_cache_free(vm_area_cachep, new);
71095 out_err:
71096 return err;
71097 @@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71098 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71099 unsigned long addr, int new_below)
71100 {
71101 +
71102 +#ifdef CONFIG_PAX_SEGMEXEC
71103 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71104 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71105 + if (mm->map_count >= sysctl_max_map_count-1)
71106 + return -ENOMEM;
71107 + } else
71108 +#endif
71109 +
71110 if (mm->map_count >= sysctl_max_map_count)
71111 return -ENOMEM;
71112
71113 @@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71114 * work. This now handles partial unmappings.
71115 * Jeremy Fitzhardinge <jeremy@goop.org>
71116 */
71117 +#ifdef CONFIG_PAX_SEGMEXEC
71118 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71119 {
71120 + int ret = __do_munmap(mm, start, len);
71121 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71122 + return ret;
71123 +
71124 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71125 +}
71126 +
71127 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71128 +#else
71129 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71130 +#endif
71131 +{
71132 unsigned long end;
71133 struct vm_area_struct *vma, *prev, *last;
71134
71135 + /*
71136 + * mm->mmap_sem is required to protect against another thread
71137 + * changing the mappings in case we sleep.
71138 + */
71139 + verify_mm_writelocked(mm);
71140 +
71141 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71142 return -EINVAL;
71143
71144 @@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71145 /* Fix up all other VM information */
71146 remove_vma_list(mm, vma);
71147
71148 + track_exec_limit(mm, start, end, 0UL);
71149 +
71150 return 0;
71151 }
71152
71153 @@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71154
71155 profile_munmap(addr);
71156
71157 +#ifdef CONFIG_PAX_SEGMEXEC
71158 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71159 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
71160 + return -EINVAL;
71161 +#endif
71162 +
71163 down_write(&mm->mmap_sem);
71164 ret = do_munmap(mm, addr, len);
71165 up_write(&mm->mmap_sem);
71166 return ret;
71167 }
71168
71169 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71170 -{
71171 -#ifdef CONFIG_DEBUG_VM
71172 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71173 - WARN_ON(1);
71174 - up_read(&mm->mmap_sem);
71175 - }
71176 -#endif
71177 -}
71178 -
71179 /*
71180 * this is really a simplified "do_mmap". it only handles
71181 * anonymous maps. eventually we may be able to do some
71182 @@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71183 struct rb_node ** rb_link, * rb_parent;
71184 pgoff_t pgoff = addr >> PAGE_SHIFT;
71185 int error;
71186 + unsigned long charged;
71187
71188 len = PAGE_ALIGN(len);
71189 if (!len)
71190 @@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71191
71192 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71193
71194 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71195 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71196 + flags &= ~VM_EXEC;
71197 +
71198 +#ifdef CONFIG_PAX_MPROTECT
71199 + if (mm->pax_flags & MF_PAX_MPROTECT)
71200 + flags &= ~VM_MAYEXEC;
71201 +#endif
71202 +
71203 + }
71204 +#endif
71205 +
71206 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71207 if (error & ~PAGE_MASK)
71208 return error;
71209
71210 + charged = len >> PAGE_SHIFT;
71211 +
71212 /*
71213 * mlock MCL_FUTURE?
71214 */
71215 if (mm->def_flags & VM_LOCKED) {
71216 unsigned long locked, lock_limit;
71217 - locked = len >> PAGE_SHIFT;
71218 + locked = charged;
71219 locked += mm->locked_vm;
71220 lock_limit = rlimit(RLIMIT_MEMLOCK);
71221 lock_limit >>= PAGE_SHIFT;
71222 @@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71223 /*
71224 * Clear old maps. this also does some error checking for us
71225 */
71226 - munmap_back:
71227 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71228 if (vma && vma->vm_start < addr + len) {
71229 if (do_munmap(mm, addr, len))
71230 return -ENOMEM;
71231 - goto munmap_back;
71232 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71233 + BUG_ON(vma && vma->vm_start < addr + len);
71234 }
71235
71236 /* Check against address space limits *after* clearing old maps... */
71237 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71238 + if (!may_expand_vm(mm, charged))
71239 return -ENOMEM;
71240
71241 if (mm->map_count > sysctl_max_map_count)
71242 return -ENOMEM;
71243
71244 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
71245 + if (security_vm_enough_memory(charged))
71246 return -ENOMEM;
71247
71248 /* Can we just expand an old private anonymous mapping? */
71249 @@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71250 */
71251 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71252 if (!vma) {
71253 - vm_unacct_memory(len >> PAGE_SHIFT);
71254 + vm_unacct_memory(charged);
71255 return -ENOMEM;
71256 }
71257
71258 @@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71259 vma_link(mm, vma, prev, rb_link, rb_parent);
71260 out:
71261 perf_event_mmap(vma);
71262 - mm->total_vm += len >> PAGE_SHIFT;
71263 + mm->total_vm += charged;
71264 if (flags & VM_LOCKED) {
71265 if (!mlock_vma_pages_range(vma, addr, addr + len))
71266 - mm->locked_vm += (len >> PAGE_SHIFT);
71267 + mm->locked_vm += charged;
71268 }
71269 + track_exec_limit(mm, addr, addr + len, flags);
71270 return addr;
71271 }
71272
71273 @@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
71274 * Walk the list again, actually closing and freeing it,
71275 * with preemption enabled, without holding any MM locks.
71276 */
71277 - while (vma)
71278 + while (vma) {
71279 + vma->vm_mirror = NULL;
71280 vma = remove_vma(vma);
71281 + }
71282
71283 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71284 }
71285 @@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71286 struct vm_area_struct * __vma, * prev;
71287 struct rb_node ** rb_link, * rb_parent;
71288
71289 +#ifdef CONFIG_PAX_SEGMEXEC
71290 + struct vm_area_struct *vma_m = NULL;
71291 +#endif
71292 +
71293 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71294 + return -EPERM;
71295 +
71296 /*
71297 * The vm_pgoff of a purely anonymous vma should be irrelevant
71298 * until its first write fault, when page's anon_vma and index
71299 @@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71300 if ((vma->vm_flags & VM_ACCOUNT) &&
71301 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71302 return -ENOMEM;
71303 +
71304 +#ifdef CONFIG_PAX_SEGMEXEC
71305 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71306 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71307 + if (!vma_m)
71308 + return -ENOMEM;
71309 + }
71310 +#endif
71311 +
71312 vma_link(mm, vma, prev, rb_link, rb_parent);
71313 +
71314 +#ifdef CONFIG_PAX_SEGMEXEC
71315 + if (vma_m)
71316 + BUG_ON(pax_mirror_vma(vma_m, vma));
71317 +#endif
71318 +
71319 return 0;
71320 }
71321
71322 @@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71323 struct mempolicy *pol;
71324 bool faulted_in_anon_vma = true;
71325
71326 + BUG_ON(vma->vm_mirror);
71327 +
71328 /*
71329 * If anonymous vma has not yet been faulted, update new pgoff
71330 * to match new location, to increase its chance of merging.
71331 @@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71332 return NULL;
71333 }
71334
71335 +#ifdef CONFIG_PAX_SEGMEXEC
71336 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71337 +{
71338 + struct vm_area_struct *prev_m;
71339 + struct rb_node **rb_link_m, *rb_parent_m;
71340 + struct mempolicy *pol_m;
71341 +
71342 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71343 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71344 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71345 + *vma_m = *vma;
71346 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71347 + if (anon_vma_clone(vma_m, vma))
71348 + return -ENOMEM;
71349 + pol_m = vma_policy(vma_m);
71350 + mpol_get(pol_m);
71351 + vma_set_policy(vma_m, pol_m);
71352 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71353 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71354 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71355 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71356 + if (vma_m->vm_file)
71357 + get_file(vma_m->vm_file);
71358 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71359 + vma_m->vm_ops->open(vma_m);
71360 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71361 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71362 + vma_m->vm_mirror = vma;
71363 + vma->vm_mirror = vma_m;
71364 + return 0;
71365 +}
71366 +#endif
71367 +
71368 /*
71369 * Return true if the calling process may expand its vm space by the passed
71370 * number of pages
71371 @@ -2392,7 +2882,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71372 unsigned long lim;
71373
71374 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71375 -
71376 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71377 if (cur + npages > lim)
71378 return 0;
71379 return 1;
71380 @@ -2463,6 +2953,22 @@ int install_special_mapping(struct mm_struct *mm,
71381 vma->vm_start = addr;
71382 vma->vm_end = addr + len;
71383
71384 +#ifdef CONFIG_PAX_MPROTECT
71385 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71386 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71387 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71388 + return -EPERM;
71389 + if (!(vm_flags & VM_EXEC))
71390 + vm_flags &= ~VM_MAYEXEC;
71391 +#else
71392 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71393 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71394 +#endif
71395 + else
71396 + vm_flags &= ~VM_MAYWRITE;
71397 + }
71398 +#endif
71399 +
71400 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71401 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71402
71403 diff --git a/mm/mprotect.c b/mm/mprotect.c
71404 index f437d05..e3763f6 100644
71405 --- a/mm/mprotect.c
71406 +++ b/mm/mprotect.c
71407 @@ -23,10 +23,16 @@
71408 #include <linux/mmu_notifier.h>
71409 #include <linux/migrate.h>
71410 #include <linux/perf_event.h>
71411 +
71412 +#ifdef CONFIG_PAX_MPROTECT
71413 +#include <linux/elf.h>
71414 +#endif
71415 +
71416 #include <asm/uaccess.h>
71417 #include <asm/pgtable.h>
71418 #include <asm/cacheflush.h>
71419 #include <asm/tlbflush.h>
71420 +#include <asm/mmu_context.h>
71421
71422 #ifndef pgprot_modify
71423 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71424 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
71425 flush_tlb_range(vma, start, end);
71426 }
71427
71428 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71429 +/* called while holding the mmap semaphor for writing except stack expansion */
71430 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71431 +{
71432 + unsigned long oldlimit, newlimit = 0UL;
71433 +
71434 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71435 + return;
71436 +
71437 + spin_lock(&mm->page_table_lock);
71438 + oldlimit = mm->context.user_cs_limit;
71439 + if ((prot & VM_EXEC) && oldlimit < end)
71440 + /* USER_CS limit moved up */
71441 + newlimit = end;
71442 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71443 + /* USER_CS limit moved down */
71444 + newlimit = start;
71445 +
71446 + if (newlimit) {
71447 + mm->context.user_cs_limit = newlimit;
71448 +
71449 +#ifdef CONFIG_SMP
71450 + wmb();
71451 + cpus_clear(mm->context.cpu_user_cs_mask);
71452 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71453 +#endif
71454 +
71455 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71456 + }
71457 + spin_unlock(&mm->page_table_lock);
71458 + if (newlimit == end) {
71459 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71460 +
71461 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71462 + if (is_vm_hugetlb_page(vma))
71463 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71464 + else
71465 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71466 + }
71467 +}
71468 +#endif
71469 +
71470 int
71471 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71472 unsigned long start, unsigned long end, unsigned long newflags)
71473 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71474 int error;
71475 int dirty_accountable = 0;
71476
71477 +#ifdef CONFIG_PAX_SEGMEXEC
71478 + struct vm_area_struct *vma_m = NULL;
71479 + unsigned long start_m, end_m;
71480 +
71481 + start_m = start + SEGMEXEC_TASK_SIZE;
71482 + end_m = end + SEGMEXEC_TASK_SIZE;
71483 +#endif
71484 +
71485 if (newflags == oldflags) {
71486 *pprev = vma;
71487 return 0;
71488 }
71489
71490 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71491 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71492 +
71493 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71494 + return -ENOMEM;
71495 +
71496 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71497 + return -ENOMEM;
71498 + }
71499 +
71500 /*
71501 * If we make a private mapping writable we increase our commit;
71502 * but (without finer accounting) cannot reduce our commit if we
71503 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71504 }
71505 }
71506
71507 +#ifdef CONFIG_PAX_SEGMEXEC
71508 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71509 + if (start != vma->vm_start) {
71510 + error = split_vma(mm, vma, start, 1);
71511 + if (error)
71512 + goto fail;
71513 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71514 + *pprev = (*pprev)->vm_next;
71515 + }
71516 +
71517 + if (end != vma->vm_end) {
71518 + error = split_vma(mm, vma, end, 0);
71519 + if (error)
71520 + goto fail;
71521 + }
71522 +
71523 + if (pax_find_mirror_vma(vma)) {
71524 + error = __do_munmap(mm, start_m, end_m - start_m);
71525 + if (error)
71526 + goto fail;
71527 + } else {
71528 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71529 + if (!vma_m) {
71530 + error = -ENOMEM;
71531 + goto fail;
71532 + }
71533 + vma->vm_flags = newflags;
71534 + error = pax_mirror_vma(vma_m, vma);
71535 + if (error) {
71536 + vma->vm_flags = oldflags;
71537 + goto fail;
71538 + }
71539 + }
71540 + }
71541 +#endif
71542 +
71543 /*
71544 * First try to merge with previous and/or next vma.
71545 */
71546 @@ -204,9 +306,21 @@ success:
71547 * vm_flags and vm_page_prot are protected by the mmap_sem
71548 * held in write mode.
71549 */
71550 +
71551 +#ifdef CONFIG_PAX_SEGMEXEC
71552 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71553 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71554 +#endif
71555 +
71556 vma->vm_flags = newflags;
71557 +
71558 +#ifdef CONFIG_PAX_MPROTECT
71559 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71560 + mm->binfmt->handle_mprotect(vma, newflags);
71561 +#endif
71562 +
71563 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71564 - vm_get_page_prot(newflags));
71565 + vm_get_page_prot(vma->vm_flags));
71566
71567 if (vma_wants_writenotify(vma)) {
71568 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71569 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71570 end = start + len;
71571 if (end <= start)
71572 return -ENOMEM;
71573 +
71574 +#ifdef CONFIG_PAX_SEGMEXEC
71575 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71576 + if (end > SEGMEXEC_TASK_SIZE)
71577 + return -EINVAL;
71578 + } else
71579 +#endif
71580 +
71581 + if (end > TASK_SIZE)
71582 + return -EINVAL;
71583 +
71584 if (!arch_validate_prot(prot))
71585 return -EINVAL;
71586
71587 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71588 /*
71589 * Does the application expect PROT_READ to imply PROT_EXEC:
71590 */
71591 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71592 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71593 prot |= PROT_EXEC;
71594
71595 vm_flags = calc_vm_prot_bits(prot);
71596 @@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71597 if (start > vma->vm_start)
71598 prev = vma;
71599
71600 +#ifdef CONFIG_PAX_MPROTECT
71601 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
71602 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
71603 +#endif
71604 +
71605 for (nstart = start ; ; ) {
71606 unsigned long newflags;
71607
71608 @@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71609
71610 /* newflags >> 4 shift VM_MAY% in place of VM_% */
71611 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
71612 + if (prot & (PROT_WRITE | PROT_EXEC))
71613 + gr_log_rwxmprotect(vma->vm_file);
71614 +
71615 + error = -EACCES;
71616 + goto out;
71617 + }
71618 +
71619 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
71620 error = -EACCES;
71621 goto out;
71622 }
71623 @@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71624 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
71625 if (error)
71626 goto out;
71627 +
71628 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
71629 +
71630 nstart = tmp;
71631
71632 if (nstart < prev->vm_end)
71633 diff --git a/mm/mremap.c b/mm/mremap.c
71634 index 87bb839..c3bfadb 100644
71635 --- a/mm/mremap.c
71636 +++ b/mm/mremap.c
71637 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
71638 continue;
71639 pte = ptep_get_and_clear(mm, old_addr, old_pte);
71640 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
71641 +
71642 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71643 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
71644 + pte = pte_exprotect(pte);
71645 +#endif
71646 +
71647 set_pte_at(mm, new_addr, new_pte, pte);
71648 }
71649
71650 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
71651 if (is_vm_hugetlb_page(vma))
71652 goto Einval;
71653
71654 +#ifdef CONFIG_PAX_SEGMEXEC
71655 + if (pax_find_mirror_vma(vma))
71656 + goto Einval;
71657 +#endif
71658 +
71659 /* We can't remap across vm area boundaries */
71660 if (old_len > vma->vm_end - addr)
71661 goto Efault;
71662 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
71663 unsigned long ret = -EINVAL;
71664 unsigned long charged = 0;
71665 unsigned long map_flags;
71666 + unsigned long pax_task_size = TASK_SIZE;
71667
71668 if (new_addr & ~PAGE_MASK)
71669 goto out;
71670
71671 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
71672 +#ifdef CONFIG_PAX_SEGMEXEC
71673 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
71674 + pax_task_size = SEGMEXEC_TASK_SIZE;
71675 +#endif
71676 +
71677 + pax_task_size -= PAGE_SIZE;
71678 +
71679 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
71680 goto out;
71681
71682 /* Check if the location we're moving into overlaps the
71683 * old location at all, and fail if it does.
71684 */
71685 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
71686 - goto out;
71687 -
71688 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
71689 + if (addr + old_len > new_addr && new_addr + new_len > addr)
71690 goto out;
71691
71692 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71693 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
71694 struct vm_area_struct *vma;
71695 unsigned long ret = -EINVAL;
71696 unsigned long charged = 0;
71697 + unsigned long pax_task_size = TASK_SIZE;
71698
71699 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
71700 goto out;
71701 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
71702 if (!new_len)
71703 goto out;
71704
71705 +#ifdef CONFIG_PAX_SEGMEXEC
71706 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
71707 + pax_task_size = SEGMEXEC_TASK_SIZE;
71708 +#endif
71709 +
71710 + pax_task_size -= PAGE_SIZE;
71711 +
71712 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
71713 + old_len > pax_task_size || addr > pax_task_size-old_len)
71714 + goto out;
71715 +
71716 if (flags & MREMAP_FIXED) {
71717 if (flags & MREMAP_MAYMOVE)
71718 ret = mremap_to(addr, old_len, new_addr, new_len);
71719 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
71720 addr + new_len);
71721 }
71722 ret = addr;
71723 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
71724 goto out;
71725 }
71726 }
71727 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
71728 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71729 if (ret)
71730 goto out;
71731 +
71732 + map_flags = vma->vm_flags;
71733 ret = move_vma(vma, addr, old_len, new_len, new_addr);
71734 + if (!(ret & ~PAGE_MASK)) {
71735 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
71736 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
71737 + }
71738 }
71739 out:
71740 if (ret & ~PAGE_MASK)
71741 diff --git a/mm/nommu.c b/mm/nommu.c
71742 index f59e170..34e2a2b 100644
71743 --- a/mm/nommu.c
71744 +++ b/mm/nommu.c
71745 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
71746 int sysctl_overcommit_ratio = 50; /* default is 50% */
71747 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
71748 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
71749 -int heap_stack_gap = 0;
71750
71751 atomic_long_t mmap_pages_allocated;
71752
71753 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
71754 EXPORT_SYMBOL(find_vma);
71755
71756 /*
71757 - * find a VMA
71758 - * - we don't extend stack VMAs under NOMMU conditions
71759 - */
71760 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
71761 -{
71762 - return find_vma(mm, addr);
71763 -}
71764 -
71765 -/*
71766 * expand a stack to a given address
71767 * - not supported under NOMMU conditions
71768 */
71769 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71770
71771 /* most fields are the same, copy all, and then fixup */
71772 *new = *vma;
71773 + INIT_LIST_HEAD(&new->anon_vma_chain);
71774 *region = *vma->vm_region;
71775 new->vm_region = region;
71776
71777 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
71778 index a13ded1..b949d15 100644
71779 --- a/mm/page_alloc.c
71780 +++ b/mm/page_alloc.c
71781 @@ -335,7 +335,7 @@ out:
71782 * This usage means that zero-order pages may not be compound.
71783 */
71784
71785 -static void free_compound_page(struct page *page)
71786 +void free_compound_page(struct page *page)
71787 {
71788 __free_pages_ok(page, compound_order(page));
71789 }
71790 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
71791 int i;
71792 int bad = 0;
71793
71794 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
71795 + unsigned long index = 1UL << order;
71796 +#endif
71797 +
71798 trace_mm_page_free(page, order);
71799 kmemcheck_free_shadow(page, order);
71800
71801 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
71802 debug_check_no_obj_freed(page_address(page),
71803 PAGE_SIZE << order);
71804 }
71805 +
71806 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
71807 + for (; index; --index)
71808 + sanitize_highpage(page + index - 1);
71809 +#endif
71810 +
71811 arch_free_page(page, order);
71812 kernel_map_pages(page, 1 << order, 0);
71813
71814 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
71815 arch_alloc_page(page, order);
71816 kernel_map_pages(page, 1 << order, 1);
71817
71818 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
71819 if (gfp_flags & __GFP_ZERO)
71820 prep_zero_page(page, order, gfp_flags);
71821 +#endif
71822
71823 if (order && (gfp_flags & __GFP_COMP))
71824 prep_compound_page(page, order);
71825 @@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
71826 unsigned long pfn;
71827
71828 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
71829 +#ifdef CONFIG_X86_32
71830 + /* boot failures in VMware 8 on 32bit vanilla since
71831 + this change */
71832 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
71833 +#else
71834 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
71835 +#endif
71836 return 1;
71837 }
71838 return 0;
71839 diff --git a/mm/percpu.c b/mm/percpu.c
71840 index f47af91..7eeef99 100644
71841 --- a/mm/percpu.c
71842 +++ b/mm/percpu.c
71843 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
71844 static unsigned int pcpu_high_unit_cpu __read_mostly;
71845
71846 /* the address of the first chunk which starts with the kernel static area */
71847 -void *pcpu_base_addr __read_mostly;
71848 +void *pcpu_base_addr __read_only;
71849 EXPORT_SYMBOL_GPL(pcpu_base_addr);
71850
71851 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
71852 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
71853 index c20ff48..137702a 100644
71854 --- a/mm/process_vm_access.c
71855 +++ b/mm/process_vm_access.c
71856 @@ -13,6 +13,7 @@
71857 #include <linux/uio.h>
71858 #include <linux/sched.h>
71859 #include <linux/highmem.h>
71860 +#include <linux/security.h>
71861 #include <linux/ptrace.h>
71862 #include <linux/slab.h>
71863 #include <linux/syscalls.h>
71864 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
71865 size_t iov_l_curr_offset = 0;
71866 ssize_t iov_len;
71867
71868 + return -ENOSYS; // PaX: until properly audited
71869 +
71870 /*
71871 * Work out how many pages of struct pages we're going to need
71872 * when eventually calling get_user_pages
71873 */
71874 for (i = 0; i < riovcnt; i++) {
71875 iov_len = rvec[i].iov_len;
71876 - if (iov_len > 0) {
71877 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
71878 - + iov_len)
71879 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
71880 - / PAGE_SIZE + 1;
71881 - nr_pages = max(nr_pages, nr_pages_iov);
71882 - }
71883 + if (iov_len <= 0)
71884 + continue;
71885 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
71886 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
71887 + nr_pages = max(nr_pages, nr_pages_iov);
71888 }
71889
71890 if (nr_pages == 0)
71891 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
71892 goto free_proc_pages;
71893 }
71894
71895 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
71896 + rc = -EPERM;
71897 + goto put_task_struct;
71898 + }
71899 +
71900 mm = mm_access(task, PTRACE_MODE_ATTACH);
71901 if (!mm || IS_ERR(mm)) {
71902 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
71903 diff --git a/mm/rmap.c b/mm/rmap.c
71904 index c8454e0..b04f3a2 100644
71905 --- a/mm/rmap.c
71906 +++ b/mm/rmap.c
71907 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71908 struct anon_vma *anon_vma = vma->anon_vma;
71909 struct anon_vma_chain *avc;
71910
71911 +#ifdef CONFIG_PAX_SEGMEXEC
71912 + struct anon_vma_chain *avc_m = NULL;
71913 +#endif
71914 +
71915 might_sleep();
71916 if (unlikely(!anon_vma)) {
71917 struct mm_struct *mm = vma->vm_mm;
71918 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71919 if (!avc)
71920 goto out_enomem;
71921
71922 +#ifdef CONFIG_PAX_SEGMEXEC
71923 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
71924 + if (!avc_m)
71925 + goto out_enomem_free_avc;
71926 +#endif
71927 +
71928 anon_vma = find_mergeable_anon_vma(vma);
71929 allocated = NULL;
71930 if (!anon_vma) {
71931 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71932 /* page_table_lock to protect against threads */
71933 spin_lock(&mm->page_table_lock);
71934 if (likely(!vma->anon_vma)) {
71935 +
71936 +#ifdef CONFIG_PAX_SEGMEXEC
71937 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
71938 +
71939 + if (vma_m) {
71940 + BUG_ON(vma_m->anon_vma);
71941 + vma_m->anon_vma = anon_vma;
71942 + avc_m->anon_vma = anon_vma;
71943 + avc_m->vma = vma;
71944 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
71945 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
71946 + avc_m = NULL;
71947 + }
71948 +#endif
71949 +
71950 vma->anon_vma = anon_vma;
71951 avc->anon_vma = anon_vma;
71952 avc->vma = vma;
71953 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
71954
71955 if (unlikely(allocated))
71956 put_anon_vma(allocated);
71957 +
71958 +#ifdef CONFIG_PAX_SEGMEXEC
71959 + if (unlikely(avc_m))
71960 + anon_vma_chain_free(avc_m);
71961 +#endif
71962 +
71963 if (unlikely(avc))
71964 anon_vma_chain_free(avc);
71965 }
71966 return 0;
71967
71968 out_enomem_free_avc:
71969 +
71970 +#ifdef CONFIG_PAX_SEGMEXEC
71971 + if (avc_m)
71972 + anon_vma_chain_free(avc_m);
71973 +#endif
71974 +
71975 anon_vma_chain_free(avc);
71976 out_enomem:
71977 return -ENOMEM;
71978 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
71979 * Attach the anon_vmas from src to dst.
71980 * Returns 0 on success, -ENOMEM on failure.
71981 */
71982 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
71983 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
71984 {
71985 struct anon_vma_chain *avc, *pavc;
71986 struct anon_vma *root = NULL;
71987 @@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
71988 * the corresponding VMA in the parent process is attached to.
71989 * Returns 0 on success, non-zero on failure.
71990 */
71991 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
71992 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
71993 {
71994 struct anon_vma_chain *avc;
71995 struct anon_vma *anon_vma;
71996 diff --git a/mm/shmem.c b/mm/shmem.c
71997 index 269d049..a9d2b50 100644
71998 --- a/mm/shmem.c
71999 +++ b/mm/shmem.c
72000 @@ -31,7 +31,7 @@
72001 #include <linux/export.h>
72002 #include <linux/swap.h>
72003
72004 -static struct vfsmount *shm_mnt;
72005 +struct vfsmount *shm_mnt;
72006
72007 #ifdef CONFIG_SHMEM
72008 /*
72009 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72010 #define BOGO_DIRENT_SIZE 20
72011
72012 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72013 -#define SHORT_SYMLINK_LEN 128
72014 +#define SHORT_SYMLINK_LEN 64
72015
72016 struct shmem_xattr {
72017 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72018 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72019 int err = -ENOMEM;
72020
72021 /* Round up to L1_CACHE_BYTES to resist false sharing */
72022 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72023 - L1_CACHE_BYTES), GFP_KERNEL);
72024 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72025 if (!sbinfo)
72026 return -ENOMEM;
72027
72028 diff --git a/mm/slab.c b/mm/slab.c
72029 index f0bd785..348b96a 100644
72030 --- a/mm/slab.c
72031 +++ b/mm/slab.c
72032 @@ -153,7 +153,7 @@
72033
72034 /* Legal flag mask for kmem_cache_create(). */
72035 #if DEBUG
72036 -# define CREATE_MASK (SLAB_RED_ZONE | \
72037 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72038 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72039 SLAB_CACHE_DMA | \
72040 SLAB_STORE_USER | \
72041 @@ -161,7 +161,7 @@
72042 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72043 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72044 #else
72045 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72046 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72047 SLAB_CACHE_DMA | \
72048 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72049 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72050 @@ -290,7 +290,7 @@ struct kmem_list3 {
72051 * Need this for bootstrapping a per node allocator.
72052 */
72053 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72054 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72055 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72056 #define CACHE_CACHE 0
72057 #define SIZE_AC MAX_NUMNODES
72058 #define SIZE_L3 (2 * MAX_NUMNODES)
72059 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72060 if ((x)->max_freeable < i) \
72061 (x)->max_freeable = i; \
72062 } while (0)
72063 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72064 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72065 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72066 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72067 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72068 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72069 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72070 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72071 #else
72072 #define STATS_INC_ACTIVE(x) do { } while (0)
72073 #define STATS_DEC_ACTIVE(x) do { } while (0)
72074 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72075 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72076 */
72077 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72078 - const struct slab *slab, void *obj)
72079 + const struct slab *slab, const void *obj)
72080 {
72081 u32 offset = (obj - slab->s_mem);
72082 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72083 @@ -568,7 +568,7 @@ struct cache_names {
72084 static struct cache_names __initdata cache_names[] = {
72085 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72086 #include <linux/kmalloc_sizes.h>
72087 - {NULL,}
72088 + {NULL}
72089 #undef CACHE
72090 };
72091
72092 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72093 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72094 sizes[INDEX_AC].cs_size,
72095 ARCH_KMALLOC_MINALIGN,
72096 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72097 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72098 NULL);
72099
72100 if (INDEX_AC != INDEX_L3) {
72101 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72102 kmem_cache_create(names[INDEX_L3].name,
72103 sizes[INDEX_L3].cs_size,
72104 ARCH_KMALLOC_MINALIGN,
72105 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72106 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72107 NULL);
72108 }
72109
72110 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72111 sizes->cs_cachep = kmem_cache_create(names->name,
72112 sizes->cs_size,
72113 ARCH_KMALLOC_MINALIGN,
72114 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72115 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72116 NULL);
72117 }
72118 #ifdef CONFIG_ZONE_DMA
72119 @@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
72120 }
72121 /* cpu stats */
72122 {
72123 - unsigned long allochit = atomic_read(&cachep->allochit);
72124 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72125 - unsigned long freehit = atomic_read(&cachep->freehit);
72126 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72127 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72128 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72129 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72130 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72131
72132 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72133 allochit, allocmiss, freehit, freemiss);
72134 @@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
72135 {
72136 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72137 #ifdef CONFIG_DEBUG_SLAB_LEAK
72138 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72139 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72140 #endif
72141 return 0;
72142 }
72143 module_init(slab_proc_init);
72144 #endif
72145
72146 +void check_object_size(const void *ptr, unsigned long n, bool to)
72147 +{
72148 +
72149 +#ifdef CONFIG_PAX_USERCOPY
72150 + struct page *page;
72151 + struct kmem_cache *cachep = NULL;
72152 + struct slab *slabp;
72153 + unsigned int objnr;
72154 + unsigned long offset;
72155 + const char *type;
72156 +
72157 + if (!n)
72158 + return;
72159 +
72160 + type = "<null>";
72161 + if (ZERO_OR_NULL_PTR(ptr))
72162 + goto report;
72163 +
72164 + if (!virt_addr_valid(ptr))
72165 + return;
72166 +
72167 + page = virt_to_head_page(ptr);
72168 +
72169 + type = "<process stack>";
72170 + if (!PageSlab(page)) {
72171 + if (object_is_on_stack(ptr, n) == -1)
72172 + goto report;
72173 + return;
72174 + }
72175 +
72176 + cachep = page_get_cache(page);
72177 + type = cachep->name;
72178 + if (!(cachep->flags & SLAB_USERCOPY))
72179 + goto report;
72180 +
72181 + slabp = page_get_slab(page);
72182 + objnr = obj_to_index(cachep, slabp, ptr);
72183 + BUG_ON(objnr >= cachep->num);
72184 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72185 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72186 + return;
72187 +
72188 +report:
72189 + pax_report_usercopy(ptr, n, to, type);
72190 +#endif
72191 +
72192 +}
72193 +EXPORT_SYMBOL(check_object_size);
72194 +
72195 /**
72196 * ksize - get the actual amount of memory allocated for a given object
72197 * @objp: Pointer to the object
72198 diff --git a/mm/slob.c b/mm/slob.c
72199 index 8105be4..e045f96 100644
72200 --- a/mm/slob.c
72201 +++ b/mm/slob.c
72202 @@ -29,7 +29,7 @@
72203 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72204 * alloc_pages() directly, allocating compound pages so the page order
72205 * does not have to be separately tracked, and also stores the exact
72206 - * allocation size in page->private so that it can be used to accurately
72207 + * allocation size in slob_page->size so that it can be used to accurately
72208 * provide ksize(). These objects are detected in kfree() because slob_page()
72209 * is false for them.
72210 *
72211 @@ -58,6 +58,7 @@
72212 */
72213
72214 #include <linux/kernel.h>
72215 +#include <linux/sched.h>
72216 #include <linux/slab.h>
72217 #include <linux/mm.h>
72218 #include <linux/swap.h> /* struct reclaim_state */
72219 @@ -102,7 +103,8 @@ struct slob_page {
72220 unsigned long flags; /* mandatory */
72221 atomic_t _count; /* mandatory */
72222 slobidx_t units; /* free units left in page */
72223 - unsigned long pad[2];
72224 + unsigned long pad[1];
72225 + unsigned long size; /* size when >=PAGE_SIZE */
72226 slob_t *free; /* first free slob_t in page */
72227 struct list_head list; /* linked list of free pages */
72228 };
72229 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72230 */
72231 static inline int is_slob_page(struct slob_page *sp)
72232 {
72233 - return PageSlab((struct page *)sp);
72234 + return PageSlab((struct page *)sp) && !sp->size;
72235 }
72236
72237 static inline void set_slob_page(struct slob_page *sp)
72238 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72239
72240 static inline struct slob_page *slob_page(const void *addr)
72241 {
72242 - return (struct slob_page *)virt_to_page(addr);
72243 + return (struct slob_page *)virt_to_head_page(addr);
72244 }
72245
72246 /*
72247 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72248 /*
72249 * Return the size of a slob block.
72250 */
72251 -static slobidx_t slob_units(slob_t *s)
72252 +static slobidx_t slob_units(const slob_t *s)
72253 {
72254 if (s->units > 0)
72255 return s->units;
72256 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72257 /*
72258 * Return the next free slob block pointer after this one.
72259 */
72260 -static slob_t *slob_next(slob_t *s)
72261 +static slob_t *slob_next(const slob_t *s)
72262 {
72263 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72264 slobidx_t next;
72265 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72266 /*
72267 * Returns true if s is the last free block in its page.
72268 */
72269 -static int slob_last(slob_t *s)
72270 +static int slob_last(const slob_t *s)
72271 {
72272 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72273 }
72274 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72275 if (!page)
72276 return NULL;
72277
72278 + set_slob_page(page);
72279 return page_address(page);
72280 }
72281
72282 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72283 if (!b)
72284 return NULL;
72285 sp = slob_page(b);
72286 - set_slob_page(sp);
72287
72288 spin_lock_irqsave(&slob_lock, flags);
72289 sp->units = SLOB_UNITS(PAGE_SIZE);
72290 sp->free = b;
72291 + sp->size = 0;
72292 INIT_LIST_HEAD(&sp->list);
72293 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72294 set_slob_page_free(sp, slob_list);
72295 @@ -476,10 +479,9 @@ out:
72296 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72297 */
72298
72299 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72300 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72301 {
72302 - unsigned int *m;
72303 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72304 + slob_t *m;
72305 void *ret;
72306
72307 gfp &= gfp_allowed_mask;
72308 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72309
72310 if (!m)
72311 return NULL;
72312 - *m = size;
72313 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72314 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72315 + m[0].units = size;
72316 + m[1].units = align;
72317 ret = (void *)m + align;
72318
72319 trace_kmalloc_node(_RET_IP_, ret,
72320 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72321 gfp |= __GFP_COMP;
72322 ret = slob_new_pages(gfp, order, node);
72323 if (ret) {
72324 - struct page *page;
72325 - page = virt_to_page(ret);
72326 - page->private = size;
72327 + struct slob_page *sp;
72328 + sp = slob_page(ret);
72329 + sp->size = size;
72330 }
72331
72332 trace_kmalloc_node(_RET_IP_, ret,
72333 size, PAGE_SIZE << order, gfp, node);
72334 }
72335
72336 - kmemleak_alloc(ret, size, 1, gfp);
72337 + return ret;
72338 +}
72339 +
72340 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72341 +{
72342 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72343 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72344 +
72345 + if (!ZERO_OR_NULL_PTR(ret))
72346 + kmemleak_alloc(ret, size, 1, gfp);
72347 return ret;
72348 }
72349 EXPORT_SYMBOL(__kmalloc_node);
72350 @@ -533,13 +547,92 @@ void kfree(const void *block)
72351 sp = slob_page(block);
72352 if (is_slob_page(sp)) {
72353 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72354 - unsigned int *m = (unsigned int *)(block - align);
72355 - slob_free(m, *m + align);
72356 - } else
72357 + slob_t *m = (slob_t *)(block - align);
72358 + slob_free(m, m[0].units + align);
72359 + } else {
72360 + clear_slob_page(sp);
72361 + free_slob_page(sp);
72362 + sp->size = 0;
72363 put_page(&sp->page);
72364 + }
72365 }
72366 EXPORT_SYMBOL(kfree);
72367
72368 +void check_object_size(const void *ptr, unsigned long n, bool to)
72369 +{
72370 +
72371 +#ifdef CONFIG_PAX_USERCOPY
72372 + struct slob_page *sp;
72373 + const slob_t *free;
72374 + const void *base;
72375 + unsigned long flags;
72376 + const char *type;
72377 +
72378 + if (!n)
72379 + return;
72380 +
72381 + type = "<null>";
72382 + if (ZERO_OR_NULL_PTR(ptr))
72383 + goto report;
72384 +
72385 + if (!virt_addr_valid(ptr))
72386 + return;
72387 +
72388 + type = "<process stack>";
72389 + sp = slob_page(ptr);
72390 + if (!PageSlab((struct page *)sp)) {
72391 + if (object_is_on_stack(ptr, n) == -1)
72392 + goto report;
72393 + return;
72394 + }
72395 +
72396 + type = "<slob>";
72397 + if (sp->size) {
72398 + base = page_address(&sp->page);
72399 + if (base <= ptr && n <= sp->size - (ptr - base))
72400 + return;
72401 + goto report;
72402 + }
72403 +
72404 + /* some tricky double walking to find the chunk */
72405 + spin_lock_irqsave(&slob_lock, flags);
72406 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72407 + free = sp->free;
72408 +
72409 + while (!slob_last(free) && (void *)free <= ptr) {
72410 + base = free + slob_units(free);
72411 + free = slob_next(free);
72412 + }
72413 +
72414 + while (base < (void *)free) {
72415 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72416 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72417 + int offset;
72418 +
72419 + if (ptr < base + align)
72420 + break;
72421 +
72422 + offset = ptr - base - align;
72423 + if (offset >= m) {
72424 + base += size;
72425 + continue;
72426 + }
72427 +
72428 + if (n > m - offset)
72429 + break;
72430 +
72431 + spin_unlock_irqrestore(&slob_lock, flags);
72432 + return;
72433 + }
72434 +
72435 + spin_unlock_irqrestore(&slob_lock, flags);
72436 +report:
72437 + pax_report_usercopy(ptr, n, to, type);
72438 +#endif
72439 +
72440 +}
72441 +EXPORT_SYMBOL(check_object_size);
72442 +
72443 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72444 size_t ksize(const void *block)
72445 {
72446 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72447 sp = slob_page(block);
72448 if (is_slob_page(sp)) {
72449 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72450 - unsigned int *m = (unsigned int *)(block - align);
72451 - return SLOB_UNITS(*m) * SLOB_UNIT;
72452 + slob_t *m = (slob_t *)(block - align);
72453 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72454 } else
72455 - return sp->page.private;
72456 + return sp->size;
72457 }
72458 EXPORT_SYMBOL(ksize);
72459
72460 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72461 {
72462 struct kmem_cache *c;
72463
72464 +#ifdef CONFIG_PAX_USERCOPY
72465 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72466 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72467 +#else
72468 c = slob_alloc(sizeof(struct kmem_cache),
72469 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72470 +#endif
72471
72472 if (c) {
72473 c->name = name;
72474 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72475
72476 lockdep_trace_alloc(flags);
72477
72478 +#ifdef CONFIG_PAX_USERCOPY
72479 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72480 +#else
72481 if (c->size < PAGE_SIZE) {
72482 b = slob_alloc(c->size, flags, c->align, node);
72483 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72484 SLOB_UNITS(c->size) * SLOB_UNIT,
72485 flags, node);
72486 } else {
72487 + struct slob_page *sp;
72488 +
72489 b = slob_new_pages(flags, get_order(c->size), node);
72490 + sp = slob_page(b);
72491 + sp->size = c->size;
72492 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72493 PAGE_SIZE << get_order(c->size),
72494 flags, node);
72495 }
72496 +#endif
72497
72498 if (c->ctor)
72499 c->ctor(b);
72500 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72501
72502 static void __kmem_cache_free(void *b, int size)
72503 {
72504 - if (size < PAGE_SIZE)
72505 + struct slob_page *sp = slob_page(b);
72506 +
72507 + if (is_slob_page(sp))
72508 slob_free(b, size);
72509 - else
72510 + else {
72511 + clear_slob_page(sp);
72512 + free_slob_page(sp);
72513 + sp->size = 0;
72514 slob_free_pages(b, get_order(size));
72515 + }
72516 }
72517
72518 static void kmem_rcu_free(struct rcu_head *head)
72519 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72520
72521 void kmem_cache_free(struct kmem_cache *c, void *b)
72522 {
72523 + int size = c->size;
72524 +
72525 +#ifdef CONFIG_PAX_USERCOPY
72526 + if (size + c->align < PAGE_SIZE) {
72527 + size += c->align;
72528 + b -= c->align;
72529 + }
72530 +#endif
72531 +
72532 kmemleak_free_recursive(b, c->flags);
72533 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72534 struct slob_rcu *slob_rcu;
72535 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72536 - slob_rcu->size = c->size;
72537 + slob_rcu = b + (size - sizeof(struct slob_rcu));
72538 + slob_rcu->size = size;
72539 call_rcu(&slob_rcu->head, kmem_rcu_free);
72540 } else {
72541 - __kmem_cache_free(b, c->size);
72542 + __kmem_cache_free(b, size);
72543 }
72544
72545 +#ifdef CONFIG_PAX_USERCOPY
72546 + trace_kfree(_RET_IP_, b);
72547 +#else
72548 trace_kmem_cache_free(_RET_IP_, b);
72549 +#endif
72550 +
72551 }
72552 EXPORT_SYMBOL(kmem_cache_free);
72553
72554 diff --git a/mm/slub.c b/mm/slub.c
72555 index 4907563..e3d7905 100644
72556 --- a/mm/slub.c
72557 +++ b/mm/slub.c
72558 @@ -208,7 +208,7 @@ struct track {
72559
72560 enum track_item { TRACK_ALLOC, TRACK_FREE };
72561
72562 -#ifdef CONFIG_SYSFS
72563 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72564 static int sysfs_slab_add(struct kmem_cache *);
72565 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72566 static void sysfs_slab_remove(struct kmem_cache *);
72567 @@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
72568 if (!t->addr)
72569 return;
72570
72571 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72572 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72573 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72574 #ifdef CONFIG_STACKTRACE
72575 {
72576 @@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72577
72578 page = virt_to_head_page(x);
72579
72580 + BUG_ON(!PageSlab(page));
72581 +
72582 slab_free(s, page, x, _RET_IP_);
72583
72584 trace_kmem_cache_free(_RET_IP_, x);
72585 @@ -2604,7 +2606,7 @@ static int slub_min_objects;
72586 * Merge control. If this is set then no merging of slab caches will occur.
72587 * (Could be removed. This was introduced to pacify the merge skeptics.)
72588 */
72589 -static int slub_nomerge;
72590 +static int slub_nomerge = 1;
72591
72592 /*
72593 * Calculate the order of allocation given an slab object size.
72594 @@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
72595 else
72596 s->cpu_partial = 30;
72597
72598 - s->refcount = 1;
72599 + atomic_set(&s->refcount, 1);
72600 #ifdef CONFIG_NUMA
72601 s->remote_node_defrag_ratio = 1000;
72602 #endif
72603 @@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
72604 void kmem_cache_destroy(struct kmem_cache *s)
72605 {
72606 down_write(&slub_lock);
72607 - s->refcount--;
72608 - if (!s->refcount) {
72609 + if (atomic_dec_and_test(&s->refcount)) {
72610 list_del(&s->list);
72611 up_write(&slub_lock);
72612 if (kmem_cache_close(s)) {
72613 @@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
72614 EXPORT_SYMBOL(__kmalloc_node);
72615 #endif
72616
72617 +void check_object_size(const void *ptr, unsigned long n, bool to)
72618 +{
72619 +
72620 +#ifdef CONFIG_PAX_USERCOPY
72621 + struct page *page;
72622 + struct kmem_cache *s = NULL;
72623 + unsigned long offset;
72624 + const char *type;
72625 +
72626 + if (!n)
72627 + return;
72628 +
72629 + type = "<null>";
72630 + if (ZERO_OR_NULL_PTR(ptr))
72631 + goto report;
72632 +
72633 + if (!virt_addr_valid(ptr))
72634 + return;
72635 +
72636 + page = virt_to_head_page(ptr);
72637 +
72638 + type = "<process stack>";
72639 + if (!PageSlab(page)) {
72640 + if (object_is_on_stack(ptr, n) == -1)
72641 + goto report;
72642 + return;
72643 + }
72644 +
72645 + s = page->slab;
72646 + type = s->name;
72647 + if (!(s->flags & SLAB_USERCOPY))
72648 + goto report;
72649 +
72650 + offset = (ptr - page_address(page)) % s->size;
72651 + if (offset <= s->objsize && n <= s->objsize - offset)
72652 + return;
72653 +
72654 +report:
72655 + pax_report_usercopy(ptr, n, to, type);
72656 +#endif
72657 +
72658 +}
72659 +EXPORT_SYMBOL(check_object_size);
72660 +
72661 size_t ksize(const void *object)
72662 {
72663 struct page *page;
72664 @@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
72665 int node;
72666
72667 list_add(&s->list, &slab_caches);
72668 - s->refcount = -1;
72669 + atomic_set(&s->refcount, -1);
72670
72671 for_each_node_state(node, N_NORMAL_MEMORY) {
72672 struct kmem_cache_node *n = get_node(s, node);
72673 @@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
72674
72675 /* Caches that are not of the two-to-the-power-of size */
72676 if (KMALLOC_MIN_SIZE <= 32) {
72677 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
72678 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
72679 caches++;
72680 }
72681
72682 if (KMALLOC_MIN_SIZE <= 64) {
72683 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
72684 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
72685 caches++;
72686 }
72687
72688 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
72689 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
72690 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
72691 caches++;
72692 }
72693
72694 @@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
72695 /*
72696 * We may have set a slab to be unmergeable during bootstrap.
72697 */
72698 - if (s->refcount < 0)
72699 + if (atomic_read(&s->refcount) < 0)
72700 return 1;
72701
72702 return 0;
72703 @@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72704 down_write(&slub_lock);
72705 s = find_mergeable(size, align, flags, name, ctor);
72706 if (s) {
72707 - s->refcount++;
72708 + atomic_inc(&s->refcount);
72709 /*
72710 * Adjust the object sizes so that we clear
72711 * the complete object on kzalloc.
72712 @@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72713 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
72714
72715 if (sysfs_slab_alias(s, name)) {
72716 - s->refcount--;
72717 + atomic_dec(&s->refcount);
72718 goto err;
72719 }
72720 up_write(&slub_lock);
72721 @@ -4041,7 +4086,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
72722 }
72723 #endif
72724
72725 -#ifdef CONFIG_SYSFS
72726 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72727 static int count_inuse(struct page *page)
72728 {
72729 return page->inuse;
72730 @@ -4428,12 +4473,12 @@ static void resiliency_test(void)
72731 validate_slab_cache(kmalloc_caches[9]);
72732 }
72733 #else
72734 -#ifdef CONFIG_SYSFS
72735 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72736 static void resiliency_test(void) {};
72737 #endif
72738 #endif
72739
72740 -#ifdef CONFIG_SYSFS
72741 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72742 enum slab_stat_type {
72743 SL_ALL, /* All slabs */
72744 SL_PARTIAL, /* Only partially allocated slabs */
72745 @@ -4676,7 +4721,7 @@ SLAB_ATTR_RO(ctor);
72746
72747 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
72748 {
72749 - return sprintf(buf, "%d\n", s->refcount - 1);
72750 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
72751 }
72752 SLAB_ATTR_RO(aliases);
72753
72754 @@ -5243,6 +5288,7 @@ static char *create_unique_id(struct kmem_cache *s)
72755 return name;
72756 }
72757
72758 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72759 static int sysfs_slab_add(struct kmem_cache *s)
72760 {
72761 int err;
72762 @@ -5305,6 +5351,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
72763 kobject_del(&s->kobj);
72764 kobject_put(&s->kobj);
72765 }
72766 +#endif
72767
72768 /*
72769 * Need to buffer aliases during bootup until sysfs becomes
72770 @@ -5318,6 +5365,7 @@ struct saved_alias {
72771
72772 static struct saved_alias *alias_list;
72773
72774 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72775 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
72776 {
72777 struct saved_alias *al;
72778 @@ -5340,6 +5388,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
72779 alias_list = al;
72780 return 0;
72781 }
72782 +#endif
72783
72784 static int __init slab_sysfs_init(void)
72785 {
72786 diff --git a/mm/swap.c b/mm/swap.c
72787 index 14380e9..e244704 100644
72788 --- a/mm/swap.c
72789 +++ b/mm/swap.c
72790 @@ -30,6 +30,7 @@
72791 #include <linux/backing-dev.h>
72792 #include <linux/memcontrol.h>
72793 #include <linux/gfp.h>
72794 +#include <linux/hugetlb.h>
72795
72796 #include "internal.h"
72797
72798 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
72799
72800 __page_cache_release(page);
72801 dtor = get_compound_page_dtor(page);
72802 + if (!PageHuge(page))
72803 + BUG_ON(dtor != free_compound_page);
72804 (*dtor)(page);
72805 }
72806
72807 diff --git a/mm/swapfile.c b/mm/swapfile.c
72808 index d999f09..e00270a 100644
72809 --- a/mm/swapfile.c
72810 +++ b/mm/swapfile.c
72811 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
72812
72813 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
72814 /* Activity counter to indicate that a swapon or swapoff has occurred */
72815 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
72816 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
72817
72818 static inline unsigned char swap_count(unsigned char ent)
72819 {
72820 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
72821 }
72822 filp_close(swap_file, NULL);
72823 err = 0;
72824 - atomic_inc(&proc_poll_event);
72825 + atomic_inc_unchecked(&proc_poll_event);
72826 wake_up_interruptible(&proc_poll_wait);
72827
72828 out_dput:
72829 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
72830
72831 poll_wait(file, &proc_poll_wait, wait);
72832
72833 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
72834 - seq->poll_event = atomic_read(&proc_poll_event);
72835 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
72836 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72837 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
72838 }
72839
72840 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
72841 return ret;
72842
72843 seq = file->private_data;
72844 - seq->poll_event = atomic_read(&proc_poll_event);
72845 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72846 return 0;
72847 }
72848
72849 @@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
72850 (p->flags & SWP_DISCARDABLE) ? "D" : "");
72851
72852 mutex_unlock(&swapon_mutex);
72853 - atomic_inc(&proc_poll_event);
72854 + atomic_inc_unchecked(&proc_poll_event);
72855 wake_up_interruptible(&proc_poll_wait);
72856
72857 if (S_ISREG(inode->i_mode))
72858 diff --git a/mm/util.c b/mm/util.c
72859 index 136ac4f..5117eef 100644
72860 --- a/mm/util.c
72861 +++ b/mm/util.c
72862 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
72863 * allocated buffer. Use this if you don't want to free the buffer immediately
72864 * like, for example, with RCU.
72865 */
72866 +#undef __krealloc
72867 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
72868 {
72869 void *ret;
72870 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
72871 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
72872 * %NULL pointer, the object pointed to is freed.
72873 */
72874 +#undef krealloc
72875 void *krealloc(const void *p, size_t new_size, gfp_t flags)
72876 {
72877 void *ret;
72878 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
72879 void arch_pick_mmap_layout(struct mm_struct *mm)
72880 {
72881 mm->mmap_base = TASK_UNMAPPED_BASE;
72882 +
72883 +#ifdef CONFIG_PAX_RANDMMAP
72884 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72885 + mm->mmap_base += mm->delta_mmap;
72886 +#endif
72887 +
72888 mm->get_unmapped_area = arch_get_unmapped_area;
72889 mm->unmap_area = arch_unmap_area;
72890 }
72891 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
72892 index 86ce9a5..0fa4d89 100644
72893 --- a/mm/vmalloc.c
72894 +++ b/mm/vmalloc.c
72895 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
72896
72897 pte = pte_offset_kernel(pmd, addr);
72898 do {
72899 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72900 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72901 +
72902 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72903 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
72904 + BUG_ON(!pte_exec(*pte));
72905 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
72906 + continue;
72907 + }
72908 +#endif
72909 +
72910 + {
72911 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72912 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72913 + }
72914 } while (pte++, addr += PAGE_SIZE, addr != end);
72915 }
72916
72917 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
72918 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
72919 {
72920 pte_t *pte;
72921 + int ret = -ENOMEM;
72922
72923 /*
72924 * nr is a running index into the array which helps higher level
72925 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
72926 pte = pte_alloc_kernel(pmd, addr);
72927 if (!pte)
72928 return -ENOMEM;
72929 +
72930 + pax_open_kernel();
72931 do {
72932 struct page *page = pages[*nr];
72933
72934 - if (WARN_ON(!pte_none(*pte)))
72935 - return -EBUSY;
72936 - if (WARN_ON(!page))
72937 - return -ENOMEM;
72938 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72939 + if (pgprot_val(prot) & _PAGE_NX)
72940 +#endif
72941 +
72942 + if (WARN_ON(!pte_none(*pte))) {
72943 + ret = -EBUSY;
72944 + goto out;
72945 + }
72946 + if (WARN_ON(!page)) {
72947 + ret = -ENOMEM;
72948 + goto out;
72949 + }
72950 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
72951 (*nr)++;
72952 } while (pte++, addr += PAGE_SIZE, addr != end);
72953 - return 0;
72954 + ret = 0;
72955 +out:
72956 + pax_close_kernel();
72957 + return ret;
72958 }
72959
72960 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
72961 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
72962 * and fall back on vmalloc() if that fails. Others
72963 * just put it in the vmalloc space.
72964 */
72965 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
72966 +#ifdef CONFIG_MODULES
72967 +#ifdef MODULES_VADDR
72968 unsigned long addr = (unsigned long)x;
72969 if (addr >= MODULES_VADDR && addr < MODULES_END)
72970 return 1;
72971 #endif
72972 +
72973 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72974 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
72975 + return 1;
72976 +#endif
72977 +
72978 +#endif
72979 +
72980 return is_vmalloc_addr(x);
72981 }
72982
72983 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
72984
72985 if (!pgd_none(*pgd)) {
72986 pud_t *pud = pud_offset(pgd, addr);
72987 +#ifdef CONFIG_X86
72988 + if (!pud_large(*pud))
72989 +#endif
72990 if (!pud_none(*pud)) {
72991 pmd_t *pmd = pmd_offset(pud, addr);
72992 +#ifdef CONFIG_X86
72993 + if (!pmd_large(*pmd))
72994 +#endif
72995 if (!pmd_none(*pmd)) {
72996 pte_t *ptep, pte;
72997
72998 @@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
72999 struct vm_struct *area;
73000
73001 BUG_ON(in_interrupt());
73002 +
73003 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73004 + if (flags & VM_KERNEXEC) {
73005 + if (start != VMALLOC_START || end != VMALLOC_END)
73006 + return NULL;
73007 + start = (unsigned long)MODULES_EXEC_VADDR;
73008 + end = (unsigned long)MODULES_EXEC_END;
73009 + }
73010 +#endif
73011 +
73012 if (flags & VM_IOREMAP) {
73013 int bit = fls(size);
73014
73015 @@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
73016 if (count > totalram_pages)
73017 return NULL;
73018
73019 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73020 + if (!(pgprot_val(prot) & _PAGE_NX))
73021 + flags |= VM_KERNEXEC;
73022 +#endif
73023 +
73024 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73025 __builtin_return_address(0));
73026 if (!area)
73027 @@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73028 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73029 goto fail;
73030
73031 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73032 + if (!(pgprot_val(prot) & _PAGE_NX))
73033 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73034 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73035 + else
73036 +#endif
73037 +
73038 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73039 start, end, node, gfp_mask, caller);
73040 if (!area)
73041 @@ -1704,6 +1766,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
73042 gfp_mask, prot, node, caller);
73043 }
73044
73045 +#undef __vmalloc
73046 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73047 {
73048 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
73049 @@ -1727,6 +1790,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
73050 * For tight control over page level allocator and protection flags
73051 * use __vmalloc() instead.
73052 */
73053 +#undef vmalloc
73054 void *vmalloc(unsigned long size)
73055 {
73056 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
73057 @@ -1743,6 +1807,7 @@ EXPORT_SYMBOL(vmalloc);
73058 * For tight control over page level allocator and protection flags
73059 * use __vmalloc() instead.
73060 */
73061 +#undef vzalloc
73062 void *vzalloc(unsigned long size)
73063 {
73064 return __vmalloc_node_flags(size, -1,
73065 @@ -1757,6 +1822,7 @@ EXPORT_SYMBOL(vzalloc);
73066 * The resulting memory area is zeroed so it can be mapped to userspace
73067 * without leaking data.
73068 */
73069 +#undef vmalloc_user
73070 void *vmalloc_user(unsigned long size)
73071 {
73072 struct vm_struct *area;
73073 @@ -1784,6 +1850,7 @@ EXPORT_SYMBOL(vmalloc_user);
73074 * For tight control over page level allocator and protection flags
73075 * use __vmalloc() instead.
73076 */
73077 +#undef vmalloc_node
73078 void *vmalloc_node(unsigned long size, int node)
73079 {
73080 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
73081 @@ -1803,6 +1870,7 @@ EXPORT_SYMBOL(vmalloc_node);
73082 * For tight control over page level allocator and protection flags
73083 * use __vmalloc_node() instead.
73084 */
73085 +#undef vzalloc_node
73086 void *vzalloc_node(unsigned long size, int node)
73087 {
73088 return __vmalloc_node_flags(size, node,
73089 @@ -1825,10 +1893,10 @@ EXPORT_SYMBOL(vzalloc_node);
73090 * For tight control over page level allocator and protection flags
73091 * use __vmalloc() instead.
73092 */
73093 -
73094 +#undef vmalloc_exec
73095 void *vmalloc_exec(unsigned long size)
73096 {
73097 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73098 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73099 -1, __builtin_return_address(0));
73100 }
73101
73102 @@ -1847,6 +1915,7 @@ void *vmalloc_exec(unsigned long size)
73103 * Allocate enough 32bit PA addressable pages to cover @size from the
73104 * page level allocator and map them into contiguous kernel virtual space.
73105 */
73106 +#undef vmalloc_32
73107 void *vmalloc_32(unsigned long size)
73108 {
73109 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
73110 @@ -1861,6 +1930,7 @@ EXPORT_SYMBOL(vmalloc_32);
73111 * The resulting memory area is 32bit addressable and zeroed so it can be
73112 * mapped to userspace without leaking data.
73113 */
73114 +#undef vmalloc_32_user
73115 void *vmalloc_32_user(unsigned long size)
73116 {
73117 struct vm_struct *area;
73118 @@ -2123,6 +2193,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73119 unsigned long uaddr = vma->vm_start;
73120 unsigned long usize = vma->vm_end - vma->vm_start;
73121
73122 + BUG_ON(vma->vm_mirror);
73123 +
73124 if ((PAGE_SIZE-1) & (unsigned long)addr)
73125 return -EINVAL;
73126
73127 diff --git a/mm/vmstat.c b/mm/vmstat.c
73128 index f600557..1459fc8 100644
73129 --- a/mm/vmstat.c
73130 +++ b/mm/vmstat.c
73131 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73132 *
73133 * vm_stat contains the global counters
73134 */
73135 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73136 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73137 EXPORT_SYMBOL(vm_stat);
73138
73139 #ifdef CONFIG_SMP
73140 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73141 v = p->vm_stat_diff[i];
73142 p->vm_stat_diff[i] = 0;
73143 local_irq_restore(flags);
73144 - atomic_long_add(v, &zone->vm_stat[i]);
73145 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73146 global_diff[i] += v;
73147 #ifdef CONFIG_NUMA
73148 /* 3 seconds idle till flush */
73149 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73150
73151 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73152 if (global_diff[i])
73153 - atomic_long_add(global_diff[i], &vm_stat[i]);
73154 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73155 }
73156
73157 #endif
73158 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73159 start_cpu_timer(cpu);
73160 #endif
73161 #ifdef CONFIG_PROC_FS
73162 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73163 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73164 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73165 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73166 + {
73167 + mode_t gr_mode = S_IRUGO;
73168 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73169 + gr_mode = S_IRUSR;
73170 +#endif
73171 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73172 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73173 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73174 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73175 +#else
73176 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73177 +#endif
73178 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73179 + }
73180 #endif
73181 return 0;
73182 }
73183 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73184 index efea35b..9c8dd0b 100644
73185 --- a/net/8021q/vlan.c
73186 +++ b/net/8021q/vlan.c
73187 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73188 err = -EPERM;
73189 if (!capable(CAP_NET_ADMIN))
73190 break;
73191 - if ((args.u.name_type >= 0) &&
73192 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73193 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73194 struct vlan_net *vn;
73195
73196 vn = net_generic(net, vlan_net_id);
73197 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73198 index fccae26..e7ece2f 100644
73199 --- a/net/9p/trans_fd.c
73200 +++ b/net/9p/trans_fd.c
73201 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73202 oldfs = get_fs();
73203 set_fs(get_ds());
73204 /* The cast to a user pointer is valid due to the set_fs() */
73205 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73206 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73207 set_fs(oldfs);
73208
73209 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73210 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73211 index 876fbe8..8bbea9f 100644
73212 --- a/net/atm/atm_misc.c
73213 +++ b/net/atm/atm_misc.c
73214 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73215 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73216 return 1;
73217 atm_return(vcc, truesize);
73218 - atomic_inc(&vcc->stats->rx_drop);
73219 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73220 return 0;
73221 }
73222 EXPORT_SYMBOL(atm_charge);
73223 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73224 }
73225 }
73226 atm_return(vcc, guess);
73227 - atomic_inc(&vcc->stats->rx_drop);
73228 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73229 return NULL;
73230 }
73231 EXPORT_SYMBOL(atm_alloc_charge);
73232 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73233
73234 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73235 {
73236 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73237 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73238 __SONET_ITEMS
73239 #undef __HANDLE_ITEM
73240 }
73241 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73242
73243 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73244 {
73245 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73246 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73247 __SONET_ITEMS
73248 #undef __HANDLE_ITEM
73249 }
73250 diff --git a/net/atm/lec.h b/net/atm/lec.h
73251 index dfc0719..47c5322 100644
73252 --- a/net/atm/lec.h
73253 +++ b/net/atm/lec.h
73254 @@ -48,7 +48,7 @@ struct lane2_ops {
73255 const u8 *tlvs, u32 sizeoftlvs);
73256 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73257 const u8 *tlvs, u32 sizeoftlvs);
73258 -};
73259 +} __no_const;
73260
73261 /*
73262 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73263 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73264 index 0919a88..a23d54e 100644
73265 --- a/net/atm/mpc.h
73266 +++ b/net/atm/mpc.h
73267 @@ -33,7 +33,7 @@ struct mpoa_client {
73268 struct mpc_parameters parameters; /* parameters for this client */
73269
73270 const struct net_device_ops *old_ops;
73271 - struct net_device_ops new_ops;
73272 + net_device_ops_no_const new_ops;
73273 };
73274
73275
73276 diff --git a/net/atm/proc.c b/net/atm/proc.c
73277 index 0d020de..011c7bb 100644
73278 --- a/net/atm/proc.c
73279 +++ b/net/atm/proc.c
73280 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73281 const struct k_atm_aal_stats *stats)
73282 {
73283 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73284 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73285 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73286 - atomic_read(&stats->rx_drop));
73287 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73288 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73289 + atomic_read_unchecked(&stats->rx_drop));
73290 }
73291
73292 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73293 diff --git a/net/atm/resources.c b/net/atm/resources.c
73294 index 23f45ce..c748f1a 100644
73295 --- a/net/atm/resources.c
73296 +++ b/net/atm/resources.c
73297 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73298 static void copy_aal_stats(struct k_atm_aal_stats *from,
73299 struct atm_aal_stats *to)
73300 {
73301 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73302 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73303 __AAL_STAT_ITEMS
73304 #undef __HANDLE_ITEM
73305 }
73306 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73307 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73308 struct atm_aal_stats *to)
73309 {
73310 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73311 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73312 __AAL_STAT_ITEMS
73313 #undef __HANDLE_ITEM
73314 }
73315 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73316 index 3512e25..2b33401 100644
73317 --- a/net/batman-adv/bat_iv_ogm.c
73318 +++ b/net/batman-adv/bat_iv_ogm.c
73319 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73320
73321 /* change sequence number to network order */
73322 batman_ogm_packet->seqno =
73323 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73324 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73325
73326 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73327 batman_ogm_packet->tt_crc = htons((uint16_t)
73328 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73329 else
73330 batman_ogm_packet->gw_flags = NO_FLAGS;
73331
73332 - atomic_inc(&hard_iface->seqno);
73333 + atomic_inc_unchecked(&hard_iface->seqno);
73334
73335 slide_own_bcast_window(hard_iface);
73336 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73337 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
73338 return;
73339
73340 /* could be changed by schedule_own_packet() */
73341 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73342 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73343
73344 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73345
73346 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73347 index 7704df4..beb4e16 100644
73348 --- a/net/batman-adv/hard-interface.c
73349 +++ b/net/batman-adv/hard-interface.c
73350 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73351 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73352 dev_add_pack(&hard_iface->batman_adv_ptype);
73353
73354 - atomic_set(&hard_iface->seqno, 1);
73355 - atomic_set(&hard_iface->frag_seqno, 1);
73356 + atomic_set_unchecked(&hard_iface->seqno, 1);
73357 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73358 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73359 hard_iface->net_dev->name);
73360
73361 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73362 index 987c75a..20d6f36 100644
73363 --- a/net/batman-adv/soft-interface.c
73364 +++ b/net/batman-adv/soft-interface.c
73365 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73366
73367 /* set broadcast sequence number */
73368 bcast_packet->seqno =
73369 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73370 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73371
73372 add_bcast_packet_to_list(bat_priv, skb, 1);
73373
73374 @@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
73375 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73376
73377 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73378 - atomic_set(&bat_priv->bcast_seqno, 1);
73379 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73380 atomic_set(&bat_priv->ttvn, 0);
73381 atomic_set(&bat_priv->tt_local_changes, 0);
73382 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73383 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73384 index e9eb043..d174eeb 100644
73385 --- a/net/batman-adv/types.h
73386 +++ b/net/batman-adv/types.h
73387 @@ -38,8 +38,8 @@ struct hard_iface {
73388 int16_t if_num;
73389 char if_status;
73390 struct net_device *net_dev;
73391 - atomic_t seqno;
73392 - atomic_t frag_seqno;
73393 + atomic_unchecked_t seqno;
73394 + atomic_unchecked_t frag_seqno;
73395 unsigned char *packet_buff;
73396 int packet_len;
73397 struct kobject *hardif_obj;
73398 @@ -154,7 +154,7 @@ struct bat_priv {
73399 atomic_t orig_interval; /* uint */
73400 atomic_t hop_penalty; /* uint */
73401 atomic_t log_level; /* uint */
73402 - atomic_t bcast_seqno;
73403 + atomic_unchecked_t bcast_seqno;
73404 atomic_t bcast_queue_left;
73405 atomic_t batman_queue_left;
73406 atomic_t ttvn; /* translation table version number */
73407 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73408 index 07d1c1d..7e9bea9 100644
73409 --- a/net/batman-adv/unicast.c
73410 +++ b/net/batman-adv/unicast.c
73411 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73412 frag1->flags = UNI_FRAG_HEAD | large_tail;
73413 frag2->flags = large_tail;
73414
73415 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73416 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73417 frag1->seqno = htons(seqno - 1);
73418 frag2->seqno = htons(seqno);
73419
73420 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73421 index 07bc69e..21e76b1 100644
73422 --- a/net/bluetooth/hci_conn.c
73423 +++ b/net/bluetooth/hci_conn.c
73424 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73425 memset(&cp, 0, sizeof(cp));
73426
73427 cp.handle = cpu_to_le16(conn->handle);
73428 - memcpy(cp.ltk, ltk, sizeof(ltk));
73429 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73430
73431 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73432 }
73433 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73434 index 32d338c..d24bcdb 100644
73435 --- a/net/bluetooth/l2cap_core.c
73436 +++ b/net/bluetooth/l2cap_core.c
73437 @@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73438 break;
73439
73440 case L2CAP_CONF_RFC:
73441 - if (olen == sizeof(rfc))
73442 - memcpy(&rfc, (void *)val, olen);
73443 + if (olen != sizeof(rfc))
73444 + break;
73445 +
73446 + memcpy(&rfc, (void *)val, olen);
73447
73448 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73449 rfc.mode != chan->mode)
73450 @@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73451
73452 switch (type) {
73453 case L2CAP_CONF_RFC:
73454 - if (olen == sizeof(rfc))
73455 - memcpy(&rfc, (void *)val, olen);
73456 + if (olen != sizeof(rfc))
73457 + break;
73458 +
73459 + memcpy(&rfc, (void *)val, olen);
73460 goto done;
73461 }
73462 }
73463 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73464 index 5fe2ff3..10968b5 100644
73465 --- a/net/bridge/netfilter/ebtables.c
73466 +++ b/net/bridge/netfilter/ebtables.c
73467 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73468 tmp.valid_hooks = t->table->valid_hooks;
73469 }
73470 mutex_unlock(&ebt_mutex);
73471 - if (copy_to_user(user, &tmp, *len) != 0){
73472 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73473 BUGPRINT("c2u Didn't work\n");
73474 ret = -EFAULT;
73475 break;
73476 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
73477 index a97d97a..6f679ed 100644
73478 --- a/net/caif/caif_socket.c
73479 +++ b/net/caif/caif_socket.c
73480 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
73481 #ifdef CONFIG_DEBUG_FS
73482 struct debug_fs_counter {
73483 atomic_t caif_nr_socks;
73484 - atomic_t caif_sock_create;
73485 - atomic_t num_connect_req;
73486 - atomic_t num_connect_resp;
73487 - atomic_t num_connect_fail_resp;
73488 - atomic_t num_disconnect;
73489 - atomic_t num_remote_shutdown_ind;
73490 - atomic_t num_tx_flow_off_ind;
73491 - atomic_t num_tx_flow_on_ind;
73492 - atomic_t num_rx_flow_off;
73493 - atomic_t num_rx_flow_on;
73494 + atomic_unchecked_t caif_sock_create;
73495 + atomic_unchecked_t num_connect_req;
73496 + atomic_unchecked_t num_connect_resp;
73497 + atomic_unchecked_t num_connect_fail_resp;
73498 + atomic_unchecked_t num_disconnect;
73499 + atomic_unchecked_t num_remote_shutdown_ind;
73500 + atomic_unchecked_t num_tx_flow_off_ind;
73501 + atomic_unchecked_t num_tx_flow_on_ind;
73502 + atomic_unchecked_t num_rx_flow_off;
73503 + atomic_unchecked_t num_rx_flow_on;
73504 };
73505 static struct debug_fs_counter cnt;
73506 #define dbfs_atomic_inc(v) atomic_inc_return(v)
73507 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
73508 #define dbfs_atomic_dec(v) atomic_dec_return(v)
73509 #else
73510 #define dbfs_atomic_inc(v) 0
73511 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73512 atomic_read(&cf_sk->sk.sk_rmem_alloc),
73513 sk_rcvbuf_lowwater(cf_sk));
73514 set_rx_flow_off(cf_sk);
73515 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
73516 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73517 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73518 }
73519
73520 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73521 set_rx_flow_off(cf_sk);
73522 if (net_ratelimit())
73523 pr_debug("sending flow OFF due to rmem_schedule\n");
73524 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
73525 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73526 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73527 }
73528 skb->dev = NULL;
73529 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
73530 switch (flow) {
73531 case CAIF_CTRLCMD_FLOW_ON_IND:
73532 /* OK from modem to start sending again */
73533 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
73534 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
73535 set_tx_flow_on(cf_sk);
73536 cf_sk->sk.sk_state_change(&cf_sk->sk);
73537 break;
73538
73539 case CAIF_CTRLCMD_FLOW_OFF_IND:
73540 /* Modem asks us to shut up */
73541 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
73542 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
73543 set_tx_flow_off(cf_sk);
73544 cf_sk->sk.sk_state_change(&cf_sk->sk);
73545 break;
73546 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73547 /* We're now connected */
73548 caif_client_register_refcnt(&cf_sk->layer,
73549 cfsk_hold, cfsk_put);
73550 - dbfs_atomic_inc(&cnt.num_connect_resp);
73551 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
73552 cf_sk->sk.sk_state = CAIF_CONNECTED;
73553 set_tx_flow_on(cf_sk);
73554 cf_sk->sk.sk_state_change(&cf_sk->sk);
73555 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73556
73557 case CAIF_CTRLCMD_INIT_FAIL_RSP:
73558 /* Connect request failed */
73559 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
73560 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
73561 cf_sk->sk.sk_err = ECONNREFUSED;
73562 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
73563 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73564 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73565
73566 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
73567 /* Modem has closed this connection, or device is down. */
73568 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
73569 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
73570 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73571 cf_sk->sk.sk_err = ECONNRESET;
73572 set_rx_flow_on(cf_sk);
73573 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
73574 return;
73575
73576 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
73577 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
73578 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
73579 set_rx_flow_on(cf_sk);
73580 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
73581 }
73582 @@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
73583 /*ifindex = id of the interface.*/
73584 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
73585
73586 - dbfs_atomic_inc(&cnt.num_connect_req);
73587 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
73588 cf_sk->layer.receive = caif_sktrecv_cb;
73589
73590 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
73591 @@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
73592 spin_unlock_bh(&sk->sk_receive_queue.lock);
73593 sock->sk = NULL;
73594
73595 - dbfs_atomic_inc(&cnt.num_disconnect);
73596 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
73597
73598 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
73599 if (cf_sk->debugfs_socket_dir != NULL)
73600 @@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
73601 cf_sk->conn_req.protocol = protocol;
73602 /* Increase the number of sockets created. */
73603 dbfs_atomic_inc(&cnt.caif_nr_socks);
73604 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
73605 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
73606 #ifdef CONFIG_DEBUG_FS
73607 if (!IS_ERR(debugfsdir)) {
73608
73609 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73610 index 5cf5222..6f704ad 100644
73611 --- a/net/caif/cfctrl.c
73612 +++ b/net/caif/cfctrl.c
73613 @@ -9,6 +9,7 @@
73614 #include <linux/stddef.h>
73615 #include <linux/spinlock.h>
73616 #include <linux/slab.h>
73617 +#include <linux/sched.h>
73618 #include <net/caif/caif_layer.h>
73619 #include <net/caif/cfpkt.h>
73620 #include <net/caif/cfctrl.h>
73621 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73622 memset(&dev_info, 0, sizeof(dev_info));
73623 dev_info.id = 0xff;
73624 cfsrvl_init(&this->serv, 0, &dev_info, false);
73625 - atomic_set(&this->req_seq_no, 1);
73626 - atomic_set(&this->rsp_seq_no, 1);
73627 + atomic_set_unchecked(&this->req_seq_no, 1);
73628 + atomic_set_unchecked(&this->rsp_seq_no, 1);
73629 this->serv.layer.receive = cfctrl_recv;
73630 sprintf(this->serv.layer.name, "ctrl");
73631 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73632 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73633 struct cfctrl_request_info *req)
73634 {
73635 spin_lock_bh(&ctrl->info_list_lock);
73636 - atomic_inc(&ctrl->req_seq_no);
73637 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
73638 + atomic_inc_unchecked(&ctrl->req_seq_no);
73639 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73640 list_add_tail(&req->list, &ctrl->list);
73641 spin_unlock_bh(&ctrl->info_list_lock);
73642 }
73643 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73644 if (p != first)
73645 pr_warn("Requests are not received in order\n");
73646
73647 - atomic_set(&ctrl->rsp_seq_no,
73648 + atomic_set_unchecked(&ctrl->rsp_seq_no,
73649 p->sequence_no);
73650 list_del(&p->list);
73651 goto out;
73652 diff --git a/net/can/gw.c b/net/can/gw.c
73653 index 3d79b12..8de85fa 100644
73654 --- a/net/can/gw.c
73655 +++ b/net/can/gw.c
73656 @@ -96,7 +96,7 @@ struct cf_mod {
73657 struct {
73658 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73659 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73660 - } csumfunc;
73661 + } __no_const csumfunc;
73662 };
73663
73664
73665 diff --git a/net/compat.c b/net/compat.c
73666 index 6def90e..c6992fa 100644
73667 --- a/net/compat.c
73668 +++ b/net/compat.c
73669 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73670 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73671 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73672 return -EFAULT;
73673 - kmsg->msg_name = compat_ptr(tmp1);
73674 - kmsg->msg_iov = compat_ptr(tmp2);
73675 - kmsg->msg_control = compat_ptr(tmp3);
73676 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73677 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73678 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73679 return 0;
73680 }
73681
73682 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73683
73684 if (kern_msg->msg_namelen) {
73685 if (mode == VERIFY_READ) {
73686 - int err = move_addr_to_kernel(kern_msg->msg_name,
73687 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
73688 kern_msg->msg_namelen,
73689 kern_address);
73690 if (err < 0)
73691 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73692 kern_msg->msg_name = NULL;
73693
73694 tot_len = iov_from_user_compat_to_kern(kern_iov,
73695 - (struct compat_iovec __user *)kern_msg->msg_iov,
73696 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
73697 kern_msg->msg_iovlen);
73698 if (tot_len >= 0)
73699 kern_msg->msg_iov = kern_iov;
73700 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73701
73702 #define CMSG_COMPAT_FIRSTHDR(msg) \
73703 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
73704 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
73705 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
73706 (struct compat_cmsghdr __user *)NULL)
73707
73708 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
73709 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
73710 (ucmlen) <= (unsigned long) \
73711 ((mhdr)->msg_controllen - \
73712 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
73713 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
73714
73715 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
73716 struct compat_cmsghdr __user *cmsg, int cmsg_len)
73717 {
73718 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
73719 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
73720 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
73721 msg->msg_controllen)
73722 return NULL;
73723 return (struct compat_cmsghdr __user *)ptr;
73724 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73725 {
73726 struct compat_timeval ctv;
73727 struct compat_timespec cts[3];
73728 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73729 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73730 struct compat_cmsghdr cmhdr;
73731 int cmlen;
73732
73733 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73734
73735 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
73736 {
73737 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73738 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73739 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
73740 int fdnum = scm->fp->count;
73741 struct file **fp = scm->fp->fp;
73742 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
73743 return -EFAULT;
73744 old_fs = get_fs();
73745 set_fs(KERNEL_DS);
73746 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
73747 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
73748 set_fs(old_fs);
73749
73750 return err;
73751 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
73752 len = sizeof(ktime);
73753 old_fs = get_fs();
73754 set_fs(KERNEL_DS);
73755 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
73756 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
73757 set_fs(old_fs);
73758
73759 if (!err) {
73760 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73761 case MCAST_JOIN_GROUP:
73762 case MCAST_LEAVE_GROUP:
73763 {
73764 - struct compat_group_req __user *gr32 = (void *)optval;
73765 + struct compat_group_req __user *gr32 = (void __user *)optval;
73766 struct group_req __user *kgr =
73767 compat_alloc_user_space(sizeof(struct group_req));
73768 u32 interface;
73769 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73770 case MCAST_BLOCK_SOURCE:
73771 case MCAST_UNBLOCK_SOURCE:
73772 {
73773 - struct compat_group_source_req __user *gsr32 = (void *)optval;
73774 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
73775 struct group_source_req __user *kgsr = compat_alloc_user_space(
73776 sizeof(struct group_source_req));
73777 u32 interface;
73778 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73779 }
73780 case MCAST_MSFILTER:
73781 {
73782 - struct compat_group_filter __user *gf32 = (void *)optval;
73783 + struct compat_group_filter __user *gf32 = (void __user *)optval;
73784 struct group_filter __user *kgf;
73785 u32 interface, fmode, numsrc;
73786
73787 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
73788 char __user *optval, int __user *optlen,
73789 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
73790 {
73791 - struct compat_group_filter __user *gf32 = (void *)optval;
73792 + struct compat_group_filter __user *gf32 = (void __user *)optval;
73793 struct group_filter __user *kgf;
73794 int __user *koptlen;
73795 u32 interface, fmode, numsrc;
73796 diff --git a/net/core/datagram.c b/net/core/datagram.c
73797 index 68bbf9f..5ef0d12 100644
73798 --- a/net/core/datagram.c
73799 +++ b/net/core/datagram.c
73800 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
73801 }
73802
73803 kfree_skb(skb);
73804 - atomic_inc(&sk->sk_drops);
73805 + atomic_inc_unchecked(&sk->sk_drops);
73806 sk_mem_reclaim_partial(sk);
73807
73808 return err;
73809 diff --git a/net/core/dev.c b/net/core/dev.c
73810 index 6ca32f6..c7e9bbd 100644
73811 --- a/net/core/dev.c
73812 +++ b/net/core/dev.c
73813 @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
73814 if (no_module && capable(CAP_NET_ADMIN))
73815 no_module = request_module("netdev-%s", name);
73816 if (no_module && capable(CAP_SYS_MODULE)) {
73817 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73818 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
73819 +#else
73820 if (!request_module("%s", name))
73821 pr_err("Loading kernel module for a network device "
73822 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
73823 "instead\n", name);
73824 +#endif
73825 }
73826 }
73827 EXPORT_SYMBOL(dev_load);
73828 @@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
73829 {
73830 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
73831 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
73832 - atomic_long_inc(&dev->rx_dropped);
73833 + atomic_long_inc_unchecked(&dev->rx_dropped);
73834 kfree_skb(skb);
73835 return NET_RX_DROP;
73836 }
73837 @@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
73838 nf_reset(skb);
73839
73840 if (unlikely(!is_skb_forwardable(dev, skb))) {
73841 - atomic_long_inc(&dev->rx_dropped);
73842 + atomic_long_inc_unchecked(&dev->rx_dropped);
73843 kfree_skb(skb);
73844 return NET_RX_DROP;
73845 }
73846 @@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
73847
73848 struct dev_gso_cb {
73849 void (*destructor)(struct sk_buff *skb);
73850 -};
73851 +} __no_const;
73852
73853 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
73854
73855 @@ -2913,7 +2917,7 @@ enqueue:
73856
73857 local_irq_restore(flags);
73858
73859 - atomic_long_inc(&skb->dev->rx_dropped);
73860 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
73861 kfree_skb(skb);
73862 return NET_RX_DROP;
73863 }
73864 @@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb)
73865 }
73866 EXPORT_SYMBOL(netif_rx_ni);
73867
73868 -static void net_tx_action(struct softirq_action *h)
73869 +static void net_tx_action(void)
73870 {
73871 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73872
73873 @@ -3273,7 +3277,7 @@ ncls:
73874 if (pt_prev) {
73875 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
73876 } else {
73877 - atomic_long_inc(&skb->dev->rx_dropped);
73878 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
73879 kfree_skb(skb);
73880 /* Jamal, now you will not able to escape explaining
73881 * me how you were going to use this. :-)
73882 @@ -3832,7 +3836,7 @@ void netif_napi_del(struct napi_struct *napi)
73883 }
73884 EXPORT_SYMBOL(netif_napi_del);
73885
73886 -static void net_rx_action(struct softirq_action *h)
73887 +static void net_rx_action(void)
73888 {
73889 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73890 unsigned long time_limit = jiffies + 2;
73891 @@ -5889,7 +5893,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
73892 } else {
73893 netdev_stats_to_stats64(storage, &dev->stats);
73894 }
73895 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
73896 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
73897 return storage;
73898 }
73899 EXPORT_SYMBOL(dev_get_stats);
73900 diff --git a/net/core/flow.c b/net/core/flow.c
73901 index e318c7e..168b1d0 100644
73902 --- a/net/core/flow.c
73903 +++ b/net/core/flow.c
73904 @@ -61,7 +61,7 @@ struct flow_cache {
73905 struct timer_list rnd_timer;
73906 };
73907
73908 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
73909 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
73910 EXPORT_SYMBOL(flow_cache_genid);
73911 static struct flow_cache flow_cache_global;
73912 static struct kmem_cache *flow_cachep __read_mostly;
73913 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
73914
73915 static int flow_entry_valid(struct flow_cache_entry *fle)
73916 {
73917 - if (atomic_read(&flow_cache_genid) != fle->genid)
73918 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
73919 return 0;
73920 if (fle->object && !fle->object->ops->check(fle->object))
73921 return 0;
73922 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
73923 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
73924 fcp->hash_count++;
73925 }
73926 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
73927 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
73928 flo = fle->object;
73929 if (!flo)
73930 goto ret_object;
73931 @@ -280,7 +280,7 @@ nocache:
73932 }
73933 flo = resolver(net, key, family, dir, flo, ctx);
73934 if (fle) {
73935 - fle->genid = atomic_read(&flow_cache_genid);
73936 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
73937 if (!IS_ERR(flo))
73938 fle->object = flo;
73939 else
73940 diff --git a/net/core/iovec.c b/net/core/iovec.c
73941 index c40f27e..7f49254 100644
73942 --- a/net/core/iovec.c
73943 +++ b/net/core/iovec.c
73944 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
73945 if (m->msg_namelen) {
73946 if (mode == VERIFY_READ) {
73947 void __user *namep;
73948 - namep = (void __user __force *) m->msg_name;
73949 + namep = (void __force_user *) m->msg_name;
73950 err = move_addr_to_kernel(namep, m->msg_namelen,
73951 address);
73952 if (err < 0)
73953 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
73954 }
73955
73956 size = m->msg_iovlen * sizeof(struct iovec);
73957 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
73958 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
73959 return -EFAULT;
73960
73961 m->msg_iov = iov;
73962 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
73963 index f965dce..92c792a 100644
73964 --- a/net/core/rtnetlink.c
73965 +++ b/net/core/rtnetlink.c
73966 @@ -57,7 +57,7 @@ struct rtnl_link {
73967 rtnl_doit_func doit;
73968 rtnl_dumpit_func dumpit;
73969 rtnl_calcit_func calcit;
73970 -};
73971 +} __no_const;
73972
73973 static DEFINE_MUTEX(rtnl_mutex);
73974
73975 diff --git a/net/core/scm.c b/net/core/scm.c
73976 index ff52ad0..aff1c0f 100644
73977 --- a/net/core/scm.c
73978 +++ b/net/core/scm.c
73979 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
73980 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
73981 {
73982 struct cmsghdr __user *cm
73983 - = (__force struct cmsghdr __user *)msg->msg_control;
73984 + = (struct cmsghdr __force_user *)msg->msg_control;
73985 struct cmsghdr cmhdr;
73986 int cmlen = CMSG_LEN(len);
73987 int err;
73988 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
73989 err = -EFAULT;
73990 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
73991 goto out;
73992 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
73993 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
73994 goto out;
73995 cmlen = CMSG_SPACE(len);
73996 if (msg->msg_controllen < cmlen)
73997 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
73998 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
73999 {
74000 struct cmsghdr __user *cm
74001 - = (__force struct cmsghdr __user*)msg->msg_control;
74002 + = (struct cmsghdr __force_user *)msg->msg_control;
74003
74004 int fdmax = 0;
74005 int fdnum = scm->fp->count;
74006 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74007 if (fdnum < fdmax)
74008 fdmax = fdnum;
74009
74010 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74011 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74012 i++, cmfptr++)
74013 {
74014 int new_fd;
74015 diff --git a/net/core/sock.c b/net/core/sock.c
74016 index 02f8dfe..86dfd4a 100644
74017 --- a/net/core/sock.c
74018 +++ b/net/core/sock.c
74019 @@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74020 struct sk_buff_head *list = &sk->sk_receive_queue;
74021
74022 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74023 - atomic_inc(&sk->sk_drops);
74024 + atomic_inc_unchecked(&sk->sk_drops);
74025 trace_sock_rcvqueue_full(sk, skb);
74026 return -ENOMEM;
74027 }
74028 @@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74029 return err;
74030
74031 if (!sk_rmem_schedule(sk, skb->truesize)) {
74032 - atomic_inc(&sk->sk_drops);
74033 + atomic_inc_unchecked(&sk->sk_drops);
74034 return -ENOBUFS;
74035 }
74036
74037 @@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74038 skb_dst_force(skb);
74039
74040 spin_lock_irqsave(&list->lock, flags);
74041 - skb->dropcount = atomic_read(&sk->sk_drops);
74042 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74043 __skb_queue_tail(list, skb);
74044 spin_unlock_irqrestore(&list->lock, flags);
74045
74046 @@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74047 skb->dev = NULL;
74048
74049 if (sk_rcvqueues_full(sk, skb)) {
74050 - atomic_inc(&sk->sk_drops);
74051 + atomic_inc_unchecked(&sk->sk_drops);
74052 goto discard_and_relse;
74053 }
74054 if (nested)
74055 @@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74056 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74057 } else if (sk_add_backlog(sk, skb)) {
74058 bh_unlock_sock(sk);
74059 - atomic_inc(&sk->sk_drops);
74060 + atomic_inc_unchecked(&sk->sk_drops);
74061 goto discard_and_relse;
74062 }
74063
74064 @@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74065 if (len > sizeof(peercred))
74066 len = sizeof(peercred);
74067 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74068 - if (copy_to_user(optval, &peercred, len))
74069 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74070 return -EFAULT;
74071 goto lenout;
74072 }
74073 @@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74074 return -ENOTCONN;
74075 if (lv < len)
74076 return -EINVAL;
74077 - if (copy_to_user(optval, address, len))
74078 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74079 return -EFAULT;
74080 goto lenout;
74081 }
74082 @@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74083
74084 if (len > lv)
74085 len = lv;
74086 - if (copy_to_user(optval, &v, len))
74087 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74088 return -EFAULT;
74089 lenout:
74090 if (put_user(len, optlen))
74091 @@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74092 */
74093 smp_wmb();
74094 atomic_set(&sk->sk_refcnt, 1);
74095 - atomic_set(&sk->sk_drops, 0);
74096 + atomic_set_unchecked(&sk->sk_drops, 0);
74097 }
74098 EXPORT_SYMBOL(sock_init_data);
74099
74100 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74101 index b9868e1..849f809 100644
74102 --- a/net/core/sock_diag.c
74103 +++ b/net/core/sock_diag.c
74104 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74105
74106 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74107 {
74108 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74109 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74110 cookie[1] != INET_DIAG_NOCOOKIE) &&
74111 ((u32)(unsigned long)sk != cookie[0] ||
74112 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74113 return -ESTALE;
74114 else
74115 +#endif
74116 return 0;
74117 }
74118 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74119
74120 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74121 {
74122 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74123 + cookie[0] = 0;
74124 + cookie[1] = 0;
74125 +#else
74126 cookie[0] = (u32)(unsigned long)sk;
74127 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74128 +#endif
74129 }
74130 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74131
74132 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74133 index 02e75d1..9a57a7c 100644
74134 --- a/net/decnet/sysctl_net_decnet.c
74135 +++ b/net/decnet/sysctl_net_decnet.c
74136 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74137
74138 if (len > *lenp) len = *lenp;
74139
74140 - if (copy_to_user(buffer, addr, len))
74141 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74142 return -EFAULT;
74143
74144 *lenp = len;
74145 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74146
74147 if (len > *lenp) len = *lenp;
74148
74149 - if (copy_to_user(buffer, devname, len))
74150 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74151 return -EFAULT;
74152
74153 *lenp = len;
74154 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74155 index 39a2d29..f39c0fe 100644
74156 --- a/net/econet/Kconfig
74157 +++ b/net/econet/Kconfig
74158 @@ -4,7 +4,7 @@
74159
74160 config ECONET
74161 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74162 - depends on EXPERIMENTAL && INET
74163 + depends on EXPERIMENTAL && INET && BROKEN
74164 ---help---
74165 Econet is a fairly old and slow networking protocol mainly used by
74166 Acorn computers to access file and print servers. It uses native
74167 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74168 index 92fc5f6..b790d91 100644
74169 --- a/net/ipv4/fib_frontend.c
74170 +++ b/net/ipv4/fib_frontend.c
74171 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74172 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74173 fib_sync_up(dev);
74174 #endif
74175 - atomic_inc(&net->ipv4.dev_addr_genid);
74176 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74177 rt_cache_flush(dev_net(dev), -1);
74178 break;
74179 case NETDEV_DOWN:
74180 fib_del_ifaddr(ifa, NULL);
74181 - atomic_inc(&net->ipv4.dev_addr_genid);
74182 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74183 if (ifa->ifa_dev->ifa_list == NULL) {
74184 /* Last address was deleted from this interface.
74185 * Disable IP.
74186 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74187 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74188 fib_sync_up(dev);
74189 #endif
74190 - atomic_inc(&net->ipv4.dev_addr_genid);
74191 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74192 rt_cache_flush(dev_net(dev), -1);
74193 break;
74194 case NETDEV_DOWN:
74195 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74196 index 80106d8..232e898 100644
74197 --- a/net/ipv4/fib_semantics.c
74198 +++ b/net/ipv4/fib_semantics.c
74199 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74200 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74201 nh->nh_gw,
74202 nh->nh_parent->fib_scope);
74203 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74204 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74205
74206 return nh->nh_saddr;
74207 }
74208 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74209 index 984ec65..97ac518 100644
74210 --- a/net/ipv4/inet_hashtables.c
74211 +++ b/net/ipv4/inet_hashtables.c
74212 @@ -18,12 +18,15 @@
74213 #include <linux/sched.h>
74214 #include <linux/slab.h>
74215 #include <linux/wait.h>
74216 +#include <linux/security.h>
74217
74218 #include <net/inet_connection_sock.h>
74219 #include <net/inet_hashtables.h>
74220 #include <net/secure_seq.h>
74221 #include <net/ip.h>
74222
74223 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74224 +
74225 /*
74226 * Allocate and initialize a new local port bind bucket.
74227 * The bindhash mutex for snum's hash chain must be held here.
74228 @@ -530,6 +533,8 @@ ok:
74229 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74230 spin_unlock(&head->lock);
74231
74232 + gr_update_task_in_ip_table(current, inet_sk(sk));
74233 +
74234 if (tw) {
74235 inet_twsk_deschedule(tw, death_row);
74236 while (twrefcnt) {
74237 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74238 index d4d61b6..b81aec8 100644
74239 --- a/net/ipv4/inetpeer.c
74240 +++ b/net/ipv4/inetpeer.c
74241 @@ -487,8 +487,8 @@ relookup:
74242 if (p) {
74243 p->daddr = *daddr;
74244 atomic_set(&p->refcnt, 1);
74245 - atomic_set(&p->rid, 0);
74246 - atomic_set(&p->ip_id_count,
74247 + atomic_set_unchecked(&p->rid, 0);
74248 + atomic_set_unchecked(&p->ip_id_count,
74249 (daddr->family == AF_INET) ?
74250 secure_ip_id(daddr->addr.a4) :
74251 secure_ipv6_id(daddr->addr.a6));
74252 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74253 index 1f23a57..7180dfe 100644
74254 --- a/net/ipv4/ip_fragment.c
74255 +++ b/net/ipv4/ip_fragment.c
74256 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74257 return 0;
74258
74259 start = qp->rid;
74260 - end = atomic_inc_return(&peer->rid);
74261 + end = atomic_inc_return_unchecked(&peer->rid);
74262 qp->rid = end;
74263
74264 rc = qp->q.fragments && (end - start) > max;
74265 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74266 index 8aa87c1..35c3248 100644
74267 --- a/net/ipv4/ip_sockglue.c
74268 +++ b/net/ipv4/ip_sockglue.c
74269 @@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74270 len = min_t(unsigned int, len, opt->optlen);
74271 if (put_user(len, optlen))
74272 return -EFAULT;
74273 - if (copy_to_user(optval, opt->__data, len))
74274 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74275 + copy_to_user(optval, opt->__data, len))
74276 return -EFAULT;
74277 return 0;
74278 }
74279 @@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74280 if (sk->sk_type != SOCK_STREAM)
74281 return -ENOPROTOOPT;
74282
74283 - msg.msg_control = optval;
74284 + msg.msg_control = (void __force_kernel *)optval;
74285 msg.msg_controllen = len;
74286 msg.msg_flags = flags;
74287
74288 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74289 index 6e412a6..6640538 100644
74290 --- a/net/ipv4/ipconfig.c
74291 +++ b/net/ipv4/ipconfig.c
74292 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74293
74294 mm_segment_t oldfs = get_fs();
74295 set_fs(get_ds());
74296 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74297 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74298 set_fs(oldfs);
74299 return res;
74300 }
74301 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74302
74303 mm_segment_t oldfs = get_fs();
74304 set_fs(get_ds());
74305 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74306 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74307 set_fs(oldfs);
74308 return res;
74309 }
74310 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74311
74312 mm_segment_t oldfs = get_fs();
74313 set_fs(get_ds());
74314 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74315 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74316 set_fs(oldfs);
74317 return res;
74318 }
74319 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74320 index 2133c30..5c4b40b 100644
74321 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
74322 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74323 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
74324
74325 *len = 0;
74326
74327 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
74328 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
74329 if (*octets == NULL)
74330 return 0;
74331
74332 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74333 index b072386..abdebcf 100644
74334 --- a/net/ipv4/ping.c
74335 +++ b/net/ipv4/ping.c
74336 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74337 sk_rmem_alloc_get(sp),
74338 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74339 atomic_read(&sp->sk_refcnt), sp,
74340 - atomic_read(&sp->sk_drops), len);
74341 + atomic_read_unchecked(&sp->sk_drops), len);
74342 }
74343
74344 static int ping_seq_show(struct seq_file *seq, void *v)
74345 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74346 index 3ccda5a..3c1e61d 100644
74347 --- a/net/ipv4/raw.c
74348 +++ b/net/ipv4/raw.c
74349 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74350 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74351 {
74352 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74353 - atomic_inc(&sk->sk_drops);
74354 + atomic_inc_unchecked(&sk->sk_drops);
74355 kfree_skb(skb);
74356 return NET_RX_DROP;
74357 }
74358 @@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
74359
74360 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74361 {
74362 + struct icmp_filter filter;
74363 +
74364 if (optlen > sizeof(struct icmp_filter))
74365 optlen = sizeof(struct icmp_filter);
74366 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74367 + if (copy_from_user(&filter, optval, optlen))
74368 return -EFAULT;
74369 + raw_sk(sk)->filter = filter;
74370 return 0;
74371 }
74372
74373 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74374 {
74375 int len, ret = -EFAULT;
74376 + struct icmp_filter filter;
74377
74378 if (get_user(len, optlen))
74379 goto out;
74380 @@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74381 if (len > sizeof(struct icmp_filter))
74382 len = sizeof(struct icmp_filter);
74383 ret = -EFAULT;
74384 - if (put_user(len, optlen) ||
74385 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74386 + filter = raw_sk(sk)->filter;
74387 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74388 goto out;
74389 ret = 0;
74390 out: return ret;
74391 @@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74392 sk_wmem_alloc_get(sp),
74393 sk_rmem_alloc_get(sp),
74394 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74395 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74396 + atomic_read(&sp->sk_refcnt),
74397 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74398 + NULL,
74399 +#else
74400 + sp,
74401 +#endif
74402 + atomic_read_unchecked(&sp->sk_drops));
74403 }
74404
74405 static int raw_seq_show(struct seq_file *seq, void *v)
74406 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74407 index 0197747..7adb0dc 100644
74408 --- a/net/ipv4/route.c
74409 +++ b/net/ipv4/route.c
74410 @@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74411
74412 static inline int rt_genid(struct net *net)
74413 {
74414 - return atomic_read(&net->ipv4.rt_genid);
74415 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74416 }
74417
74418 #ifdef CONFIG_PROC_FS
74419 @@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
74420 unsigned char shuffle;
74421
74422 get_random_bytes(&shuffle, sizeof(shuffle));
74423 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74424 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74425 inetpeer_invalidate_tree(AF_INET);
74426 }
74427
74428 @@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
74429 error = rt->dst.error;
74430 if (peer) {
74431 inet_peer_refcheck(rt->peer);
74432 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74433 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74434 if (peer->tcp_ts_stamp) {
74435 ts = peer->tcp_ts;
74436 tsage = get_seconds() - peer->tcp_ts_stamp;
74437 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74438 index fd54c5f..96d6407 100644
74439 --- a/net/ipv4/tcp_ipv4.c
74440 +++ b/net/ipv4/tcp_ipv4.c
74441 @@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
74442 int sysctl_tcp_low_latency __read_mostly;
74443 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74444
74445 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74446 +extern int grsec_enable_blackhole;
74447 +#endif
74448
74449 #ifdef CONFIG_TCP_MD5SIG
74450 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
74451 @@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74452 return 0;
74453
74454 reset:
74455 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74456 + if (!grsec_enable_blackhole)
74457 +#endif
74458 tcp_v4_send_reset(rsk, skb);
74459 discard:
74460 kfree_skb(skb);
74461 @@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74462 TCP_SKB_CB(skb)->sacked = 0;
74463
74464 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74465 - if (!sk)
74466 + if (!sk) {
74467 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74468 + ret = 1;
74469 +#endif
74470 goto no_tcp_socket;
74471 -
74472 + }
74473 process:
74474 - if (sk->sk_state == TCP_TIME_WAIT)
74475 + if (sk->sk_state == TCP_TIME_WAIT) {
74476 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74477 + ret = 2;
74478 +#endif
74479 goto do_time_wait;
74480 + }
74481
74482 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74483 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74484 @@ -1755,6 +1768,10 @@ no_tcp_socket:
74485 bad_packet:
74486 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74487 } else {
74488 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74489 + if (!grsec_enable_blackhole || (ret == 1 &&
74490 + (skb->dev->flags & IFF_LOOPBACK)))
74491 +#endif
74492 tcp_v4_send_reset(NULL, skb);
74493 }
74494
74495 @@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74496 0, /* non standard timer */
74497 0, /* open_requests have no inode */
74498 atomic_read(&sk->sk_refcnt),
74499 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74500 + NULL,
74501 +#else
74502 req,
74503 +#endif
74504 len);
74505 }
74506
74507 @@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74508 sock_i_uid(sk),
74509 icsk->icsk_probes_out,
74510 sock_i_ino(sk),
74511 - atomic_read(&sk->sk_refcnt), sk,
74512 + atomic_read(&sk->sk_refcnt),
74513 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74514 + NULL,
74515 +#else
74516 + sk,
74517 +#endif
74518 jiffies_to_clock_t(icsk->icsk_rto),
74519 jiffies_to_clock_t(icsk->icsk_ack.ato),
74520 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74521 @@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74522 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74523 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74524 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74525 - atomic_read(&tw->tw_refcnt), tw, len);
74526 + atomic_read(&tw->tw_refcnt),
74527 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74528 + NULL,
74529 +#else
74530 + tw,
74531 +#endif
74532 + len);
74533 }
74534
74535 #define TMPSZ 150
74536 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74537 index 550e755..25721b3 100644
74538 --- a/net/ipv4/tcp_minisocks.c
74539 +++ b/net/ipv4/tcp_minisocks.c
74540 @@ -27,6 +27,10 @@
74541 #include <net/inet_common.h>
74542 #include <net/xfrm.h>
74543
74544 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74545 +extern int grsec_enable_blackhole;
74546 +#endif
74547 +
74548 int sysctl_tcp_syncookies __read_mostly = 1;
74549 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74550
74551 @@ -753,6 +757,10 @@ listen_overflow:
74552
74553 embryonic_reset:
74554 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74555 +
74556 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74557 + if (!grsec_enable_blackhole)
74558 +#endif
74559 if (!(flg & TCP_FLAG_RST))
74560 req->rsk_ops->send_reset(sk, skb);
74561
74562 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74563 index 85ee7eb..53277ab 100644
74564 --- a/net/ipv4/tcp_probe.c
74565 +++ b/net/ipv4/tcp_probe.c
74566 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74567 if (cnt + width >= len)
74568 break;
74569
74570 - if (copy_to_user(buf + cnt, tbuf, width))
74571 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74572 return -EFAULT;
74573 cnt += width;
74574 }
74575 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74576 index cd2e072..1fffee2 100644
74577 --- a/net/ipv4/tcp_timer.c
74578 +++ b/net/ipv4/tcp_timer.c
74579 @@ -22,6 +22,10 @@
74580 #include <linux/gfp.h>
74581 #include <net/tcp.h>
74582
74583 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74584 +extern int grsec_lastack_retries;
74585 +#endif
74586 +
74587 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74588 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74589 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74590 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74591 }
74592 }
74593
74594 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74595 + if ((sk->sk_state == TCP_LAST_ACK) &&
74596 + (grsec_lastack_retries > 0) &&
74597 + (grsec_lastack_retries < retry_until))
74598 + retry_until = grsec_lastack_retries;
74599 +#endif
74600 +
74601 if (retransmits_timed_out(sk, retry_until,
74602 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74603 /* Has it gone just too far? */
74604 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74605 index 5d075b5..d907d5f 100644
74606 --- a/net/ipv4/udp.c
74607 +++ b/net/ipv4/udp.c
74608 @@ -86,6 +86,7 @@
74609 #include <linux/types.h>
74610 #include <linux/fcntl.h>
74611 #include <linux/module.h>
74612 +#include <linux/security.h>
74613 #include <linux/socket.h>
74614 #include <linux/sockios.h>
74615 #include <linux/igmp.h>
74616 @@ -108,6 +109,10 @@
74617 #include <trace/events/udp.h>
74618 #include "udp_impl.h"
74619
74620 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74621 +extern int grsec_enable_blackhole;
74622 +#endif
74623 +
74624 struct udp_table udp_table __read_mostly;
74625 EXPORT_SYMBOL(udp_table);
74626
74627 @@ -566,6 +571,9 @@ found:
74628 return s;
74629 }
74630
74631 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74632 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74633 +
74634 /*
74635 * This routine is called by the ICMP module when it gets some
74636 * sort of error condition. If err < 0 then the socket should
74637 @@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74638 dport = usin->sin_port;
74639 if (dport == 0)
74640 return -EINVAL;
74641 +
74642 + err = gr_search_udp_sendmsg(sk, usin);
74643 + if (err)
74644 + return err;
74645 } else {
74646 if (sk->sk_state != TCP_ESTABLISHED)
74647 return -EDESTADDRREQ;
74648 +
74649 + err = gr_search_udp_sendmsg(sk, NULL);
74650 + if (err)
74651 + return err;
74652 +
74653 daddr = inet->inet_daddr;
74654 dport = inet->inet_dport;
74655 /* Open fast path for connected socket.
74656 @@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
74657 udp_lib_checksum_complete(skb)) {
74658 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74659 IS_UDPLITE(sk));
74660 - atomic_inc(&sk->sk_drops);
74661 + atomic_inc_unchecked(&sk->sk_drops);
74662 __skb_unlink(skb, rcvq);
74663 __skb_queue_tail(&list_kill, skb);
74664 }
74665 @@ -1186,6 +1203,10 @@ try_again:
74666 if (!skb)
74667 goto out;
74668
74669 + err = gr_search_udp_recvmsg(sk, skb);
74670 + if (err)
74671 + goto out_free;
74672 +
74673 ulen = skb->len - sizeof(struct udphdr);
74674 copied = len;
74675 if (copied > ulen)
74676 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74677
74678 drop:
74679 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74680 - atomic_inc(&sk->sk_drops);
74681 + atomic_inc_unchecked(&sk->sk_drops);
74682 kfree_skb(skb);
74683 return -1;
74684 }
74685 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74686 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
74687
74688 if (!skb1) {
74689 - atomic_inc(&sk->sk_drops);
74690 + atomic_inc_unchecked(&sk->sk_drops);
74691 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
74692 IS_UDPLITE(sk));
74693 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74694 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74695 goto csum_error;
74696
74697 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
74698 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74699 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74700 +#endif
74701 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
74702
74703 /*
74704 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
74705 sk_wmem_alloc_get(sp),
74706 sk_rmem_alloc_get(sp),
74707 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74708 - atomic_read(&sp->sk_refcnt), sp,
74709 - atomic_read(&sp->sk_drops), len);
74710 + atomic_read(&sp->sk_refcnt),
74711 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74712 + NULL,
74713 +#else
74714 + sp,
74715 +#endif
74716 + atomic_read_unchecked(&sp->sk_drops), len);
74717 }
74718
74719 int udp4_seq_show(struct seq_file *seq, void *v)
74720 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
74721 index 6b8ebc5..1d624f4 100644
74722 --- a/net/ipv6/addrconf.c
74723 +++ b/net/ipv6/addrconf.c
74724 @@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
74725 p.iph.ihl = 5;
74726 p.iph.protocol = IPPROTO_IPV6;
74727 p.iph.ttl = 64;
74728 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
74729 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
74730
74731 if (ops->ndo_do_ioctl) {
74732 mm_segment_t oldfs = get_fs();
74733 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
74734 index 02dd203..e03fcc9 100644
74735 --- a/net/ipv6/inet6_connection_sock.c
74736 +++ b/net/ipv6/inet6_connection_sock.c
74737 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
74738 #ifdef CONFIG_XFRM
74739 {
74740 struct rt6_info *rt = (struct rt6_info *)dst;
74741 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
74742 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
74743 }
74744 #endif
74745 }
74746 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
74747 #ifdef CONFIG_XFRM
74748 if (dst) {
74749 struct rt6_info *rt = (struct rt6_info *)dst;
74750 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
74751 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
74752 __sk_dst_reset(sk);
74753 dst = NULL;
74754 }
74755 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
74756 index 18a2719..779f36a 100644
74757 --- a/net/ipv6/ipv6_sockglue.c
74758 +++ b/net/ipv6/ipv6_sockglue.c
74759 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
74760 if (sk->sk_type != SOCK_STREAM)
74761 return -ENOPROTOOPT;
74762
74763 - msg.msg_control = optval;
74764 + msg.msg_control = (void __force_kernel *)optval;
74765 msg.msg_controllen = len;
74766 msg.msg_flags = flags;
74767
74768 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
74769 index d02f7e4..2d2a0f1 100644
74770 --- a/net/ipv6/raw.c
74771 +++ b/net/ipv6/raw.c
74772 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
74773 {
74774 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
74775 skb_checksum_complete(skb)) {
74776 - atomic_inc(&sk->sk_drops);
74777 + atomic_inc_unchecked(&sk->sk_drops);
74778 kfree_skb(skb);
74779 return NET_RX_DROP;
74780 }
74781 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
74782 struct raw6_sock *rp = raw6_sk(sk);
74783
74784 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
74785 - atomic_inc(&sk->sk_drops);
74786 + atomic_inc_unchecked(&sk->sk_drops);
74787 kfree_skb(skb);
74788 return NET_RX_DROP;
74789 }
74790 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
74791
74792 if (inet->hdrincl) {
74793 if (skb_checksum_complete(skb)) {
74794 - atomic_inc(&sk->sk_drops);
74795 + atomic_inc_unchecked(&sk->sk_drops);
74796 kfree_skb(skb);
74797 return NET_RX_DROP;
74798 }
74799 @@ -602,7 +602,7 @@ out:
74800 return err;
74801 }
74802
74803 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
74804 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
74805 struct flowi6 *fl6, struct dst_entry **dstp,
74806 unsigned int flags)
74807 {
74808 @@ -912,12 +912,15 @@ do_confirm:
74809 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
74810 char __user *optval, int optlen)
74811 {
74812 + struct icmp6_filter filter;
74813 +
74814 switch (optname) {
74815 case ICMPV6_FILTER:
74816 if (optlen > sizeof(struct icmp6_filter))
74817 optlen = sizeof(struct icmp6_filter);
74818 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
74819 + if (copy_from_user(&filter, optval, optlen))
74820 return -EFAULT;
74821 + raw6_sk(sk)->filter = filter;
74822 return 0;
74823 default:
74824 return -ENOPROTOOPT;
74825 @@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
74826 char __user *optval, int __user *optlen)
74827 {
74828 int len;
74829 + struct icmp6_filter filter;
74830
74831 switch (optname) {
74832 case ICMPV6_FILTER:
74833 @@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
74834 len = sizeof(struct icmp6_filter);
74835 if (put_user(len, optlen))
74836 return -EFAULT;
74837 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
74838 + filter = raw6_sk(sk)->filter;
74839 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
74840 return -EFAULT;
74841 return 0;
74842 default:
74843 @@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74844 0, 0L, 0,
74845 sock_i_uid(sp), 0,
74846 sock_i_ino(sp),
74847 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74848 + atomic_read(&sp->sk_refcnt),
74849 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74850 + NULL,
74851 +#else
74852 + sp,
74853 +#endif
74854 + atomic_read_unchecked(&sp->sk_drops));
74855 }
74856
74857 static int raw6_seq_show(struct seq_file *seq, void *v)
74858 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
74859 index 3edd05a..63aad01 100644
74860 --- a/net/ipv6/tcp_ipv6.c
74861 +++ b/net/ipv6/tcp_ipv6.c
74862 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
74863 }
74864 #endif
74865
74866 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74867 +extern int grsec_enable_blackhole;
74868 +#endif
74869 +
74870 static void tcp_v6_hash(struct sock *sk)
74871 {
74872 if (sk->sk_state != TCP_CLOSE) {
74873 @@ -1650,6 +1654,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
74874 return 0;
74875
74876 reset:
74877 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74878 + if (!grsec_enable_blackhole)
74879 +#endif
74880 tcp_v6_send_reset(sk, skb);
74881 discard:
74882 if (opt_skb)
74883 @@ -1729,12 +1736,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
74884 TCP_SKB_CB(skb)->sacked = 0;
74885
74886 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74887 - if (!sk)
74888 + if (!sk) {
74889 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74890 + ret = 1;
74891 +#endif
74892 goto no_tcp_socket;
74893 + }
74894
74895 process:
74896 - if (sk->sk_state == TCP_TIME_WAIT)
74897 + if (sk->sk_state == TCP_TIME_WAIT) {
74898 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74899 + ret = 2;
74900 +#endif
74901 goto do_time_wait;
74902 + }
74903
74904 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
74905 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74906 @@ -1782,6 +1797,10 @@ no_tcp_socket:
74907 bad_packet:
74908 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74909 } else {
74910 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74911 + if (!grsec_enable_blackhole || (ret == 1 &&
74912 + (skb->dev->flags & IFF_LOOPBACK)))
74913 +#endif
74914 tcp_v6_send_reset(NULL, skb);
74915 }
74916
74917 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
74918 uid,
74919 0, /* non standard timer */
74920 0, /* open_requests have no inode */
74921 - 0, req);
74922 + 0,
74923 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74924 + NULL
74925 +#else
74926 + req
74927 +#endif
74928 + );
74929 }
74930
74931 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74932 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74933 sock_i_uid(sp),
74934 icsk->icsk_probes_out,
74935 sock_i_ino(sp),
74936 - atomic_read(&sp->sk_refcnt), sp,
74937 + atomic_read(&sp->sk_refcnt),
74938 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74939 + NULL,
74940 +#else
74941 + sp,
74942 +#endif
74943 jiffies_to_clock_t(icsk->icsk_rto),
74944 jiffies_to_clock_t(icsk->icsk_ack.ato),
74945 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
74946 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
74947 dest->s6_addr32[2], dest->s6_addr32[3], destp,
74948 tw->tw_substate, 0, 0,
74949 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74950 - atomic_read(&tw->tw_refcnt), tw);
74951 + atomic_read(&tw->tw_refcnt),
74952 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74953 + NULL
74954 +#else
74955 + tw
74956 +#endif
74957 + );
74958 }
74959
74960 static int tcp6_seq_show(struct seq_file *seq, void *v)
74961 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
74962 index 4f96b5c..75543d7 100644
74963 --- a/net/ipv6/udp.c
74964 +++ b/net/ipv6/udp.c
74965 @@ -50,6 +50,10 @@
74966 #include <linux/seq_file.h>
74967 #include "udp_impl.h"
74968
74969 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74970 +extern int grsec_enable_blackhole;
74971 +#endif
74972 +
74973 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
74974 {
74975 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
74976 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
74977
74978 return 0;
74979 drop:
74980 - atomic_inc(&sk->sk_drops);
74981 + atomic_inc_unchecked(&sk->sk_drops);
74982 drop_no_sk_drops_inc:
74983 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74984 kfree_skb(skb);
74985 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74986 continue;
74987 }
74988 drop:
74989 - atomic_inc(&sk->sk_drops);
74990 + atomic_inc_unchecked(&sk->sk_drops);
74991 UDP6_INC_STATS_BH(sock_net(sk),
74992 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
74993 UDP6_INC_STATS_BH(sock_net(sk),
74994 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74995 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
74996 proto == IPPROTO_UDPLITE);
74997
74998 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74999 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75000 +#endif
75001 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75002
75003 kfree_skb(skb);
75004 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75005 if (!sock_owned_by_user(sk))
75006 udpv6_queue_rcv_skb(sk, skb);
75007 else if (sk_add_backlog(sk, skb)) {
75008 - atomic_inc(&sk->sk_drops);
75009 + atomic_inc_unchecked(&sk->sk_drops);
75010 bh_unlock_sock(sk);
75011 sock_put(sk);
75012 goto discard;
75013 @@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75014 0, 0L, 0,
75015 sock_i_uid(sp), 0,
75016 sock_i_ino(sp),
75017 - atomic_read(&sp->sk_refcnt), sp,
75018 - atomic_read(&sp->sk_drops));
75019 + atomic_read(&sp->sk_refcnt),
75020 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75021 + NULL,
75022 +#else
75023 + sp,
75024 +#endif
75025 + atomic_read_unchecked(&sp->sk_drops));
75026 }
75027
75028 int udp6_seq_show(struct seq_file *seq, void *v)
75029 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75030 index 253695d..9481ce8 100644
75031 --- a/net/irda/ircomm/ircomm_tty.c
75032 +++ b/net/irda/ircomm/ircomm_tty.c
75033 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75034 add_wait_queue(&self->open_wait, &wait);
75035
75036 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75037 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75038 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75039
75040 /* As far as I can see, we protect open_count - Jean II */
75041 spin_lock_irqsave(&self->spinlock, flags);
75042 if (!tty_hung_up_p(filp)) {
75043 extra_count = 1;
75044 - self->open_count--;
75045 + local_dec(&self->open_count);
75046 }
75047 spin_unlock_irqrestore(&self->spinlock, flags);
75048 - self->blocked_open++;
75049 + local_inc(&self->blocked_open);
75050
75051 while (1) {
75052 if (tty->termios->c_cflag & CBAUD) {
75053 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75054 }
75055
75056 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75057 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75058 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75059
75060 schedule();
75061 }
75062 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75063 if (extra_count) {
75064 /* ++ is not atomic, so this should be protected - Jean II */
75065 spin_lock_irqsave(&self->spinlock, flags);
75066 - self->open_count++;
75067 + local_inc(&self->open_count);
75068 spin_unlock_irqrestore(&self->spinlock, flags);
75069 }
75070 - self->blocked_open--;
75071 + local_dec(&self->blocked_open);
75072
75073 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75074 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75075 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75076
75077 if (!retval)
75078 self->flags |= ASYNC_NORMAL_ACTIVE;
75079 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75080 }
75081 /* ++ is not atomic, so this should be protected - Jean II */
75082 spin_lock_irqsave(&self->spinlock, flags);
75083 - self->open_count++;
75084 + local_inc(&self->open_count);
75085
75086 tty->driver_data = self;
75087 self->tty = tty;
75088 spin_unlock_irqrestore(&self->spinlock, flags);
75089
75090 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75091 - self->line, self->open_count);
75092 + self->line, local_read(&self->open_count));
75093
75094 /* Not really used by us, but lets do it anyway */
75095 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75096 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75097 return;
75098 }
75099
75100 - if ((tty->count == 1) && (self->open_count != 1)) {
75101 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75102 /*
75103 * Uh, oh. tty->count is 1, which means that the tty
75104 * structure will be freed. state->count should always
75105 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75106 */
75107 IRDA_DEBUG(0, "%s(), bad serial port count; "
75108 "tty->count is 1, state->count is %d\n", __func__ ,
75109 - self->open_count);
75110 - self->open_count = 1;
75111 + local_read(&self->open_count));
75112 + local_set(&self->open_count, 1);
75113 }
75114
75115 - if (--self->open_count < 0) {
75116 + if (local_dec_return(&self->open_count) < 0) {
75117 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75118 - __func__, self->line, self->open_count);
75119 - self->open_count = 0;
75120 + __func__, self->line, local_read(&self->open_count));
75121 + local_set(&self->open_count, 0);
75122 }
75123 - if (self->open_count) {
75124 + if (local_read(&self->open_count)) {
75125 spin_unlock_irqrestore(&self->spinlock, flags);
75126
75127 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75128 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75129 tty->closing = 0;
75130 self->tty = NULL;
75131
75132 - if (self->blocked_open) {
75133 + if (local_read(&self->blocked_open)) {
75134 if (self->close_delay)
75135 schedule_timeout_interruptible(self->close_delay);
75136 wake_up_interruptible(&self->open_wait);
75137 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75138 spin_lock_irqsave(&self->spinlock, flags);
75139 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75140 self->tty = NULL;
75141 - self->open_count = 0;
75142 + local_set(&self->open_count, 0);
75143 spin_unlock_irqrestore(&self->spinlock, flags);
75144
75145 wake_up_interruptible(&self->open_wait);
75146 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75147 seq_putc(m, '\n');
75148
75149 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75150 - seq_printf(m, "Open count: %d\n", self->open_count);
75151 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75152 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75153 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75154
75155 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75156 index d5c5b8f..33beff0 100644
75157 --- a/net/iucv/af_iucv.c
75158 +++ b/net/iucv/af_iucv.c
75159 @@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
75160
75161 write_lock_bh(&iucv_sk_list.lock);
75162
75163 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75164 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75165 while (__iucv_get_sock_by_name(name)) {
75166 sprintf(name, "%08x",
75167 - atomic_inc_return(&iucv_sk_list.autobind_name));
75168 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75169 }
75170
75171 write_unlock_bh(&iucv_sk_list.lock);
75172 diff --git a/net/key/af_key.c b/net/key/af_key.c
75173 index 11dbb22..c20f667 100644
75174 --- a/net/key/af_key.c
75175 +++ b/net/key/af_key.c
75176 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75177 static u32 get_acqseq(void)
75178 {
75179 u32 res;
75180 - static atomic_t acqseq;
75181 + static atomic_unchecked_t acqseq;
75182
75183 do {
75184 - res = atomic_inc_return(&acqseq);
75185 + res = atomic_inc_return_unchecked(&acqseq);
75186 } while (!res);
75187 return res;
75188 }
75189 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75190 index 2f0642d..e5c6fba 100644
75191 --- a/net/mac80211/ieee80211_i.h
75192 +++ b/net/mac80211/ieee80211_i.h
75193 @@ -28,6 +28,7 @@
75194 #include <net/ieee80211_radiotap.h>
75195 #include <net/cfg80211.h>
75196 #include <net/mac80211.h>
75197 +#include <asm/local.h>
75198 #include "key.h"
75199 #include "sta_info.h"
75200
75201 @@ -781,7 +782,7 @@ struct ieee80211_local {
75202 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75203 spinlock_t queue_stop_reason_lock;
75204
75205 - int open_count;
75206 + local_t open_count;
75207 int monitors, cooked_mntrs;
75208 /* number of interfaces with corresponding FIF_ flags */
75209 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75210 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75211 index 8e2137b..2974283 100644
75212 --- a/net/mac80211/iface.c
75213 +++ b/net/mac80211/iface.c
75214 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75215 break;
75216 }
75217
75218 - if (local->open_count == 0) {
75219 + if (local_read(&local->open_count) == 0) {
75220 res = drv_start(local);
75221 if (res)
75222 goto err_del_bss;
75223 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75224 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75225
75226 if (!is_valid_ether_addr(dev->dev_addr)) {
75227 - if (!local->open_count)
75228 + if (!local_read(&local->open_count))
75229 drv_stop(local);
75230 return -EADDRNOTAVAIL;
75231 }
75232 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75233 mutex_unlock(&local->mtx);
75234
75235 if (coming_up)
75236 - local->open_count++;
75237 + local_inc(&local->open_count);
75238
75239 if (hw_reconf_flags)
75240 ieee80211_hw_config(local, hw_reconf_flags);
75241 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75242 err_del_interface:
75243 drv_remove_interface(local, sdata);
75244 err_stop:
75245 - if (!local->open_count)
75246 + if (!local_read(&local->open_count))
75247 drv_stop(local);
75248 err_del_bss:
75249 sdata->bss = NULL;
75250 @@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75251 }
75252
75253 if (going_down)
75254 - local->open_count--;
75255 + local_dec(&local->open_count);
75256
75257 switch (sdata->vif.type) {
75258 case NL80211_IFTYPE_AP_VLAN:
75259 @@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75260
75261 ieee80211_recalc_ps(local, -1);
75262
75263 - if (local->open_count == 0) {
75264 + if (local_read(&local->open_count) == 0) {
75265 if (local->ops->napi_poll)
75266 napi_disable(&local->napi);
75267 ieee80211_clear_tx_pending(local);
75268 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75269 index b142bd4..a651749 100644
75270 --- a/net/mac80211/main.c
75271 +++ b/net/mac80211/main.c
75272 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75273 local->hw.conf.power_level = power;
75274 }
75275
75276 - if (changed && local->open_count) {
75277 + if (changed && local_read(&local->open_count)) {
75278 ret = drv_config(local, changed);
75279 /*
75280 * Goal:
75281 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75282 index 596efaf..8f1911f 100644
75283 --- a/net/mac80211/pm.c
75284 +++ b/net/mac80211/pm.c
75285 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75286 struct ieee80211_sub_if_data *sdata;
75287 struct sta_info *sta;
75288
75289 - if (!local->open_count)
75290 + if (!local_read(&local->open_count))
75291 goto suspend;
75292
75293 ieee80211_scan_cancel(local);
75294 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75295 cancel_work_sync(&local->dynamic_ps_enable_work);
75296 del_timer_sync(&local->dynamic_ps_timer);
75297
75298 - local->wowlan = wowlan && local->open_count;
75299 + local->wowlan = wowlan && local_read(&local->open_count);
75300 if (local->wowlan) {
75301 int err = drv_suspend(local, wowlan);
75302 if (err < 0) {
75303 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75304 }
75305
75306 /* stop hardware - this must stop RX */
75307 - if (local->open_count)
75308 + if (local_read(&local->open_count))
75309 ieee80211_stop_device(local);
75310
75311 suspend:
75312 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75313 index f9b8e81..bb89b46 100644
75314 --- a/net/mac80211/rate.c
75315 +++ b/net/mac80211/rate.c
75316 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75317
75318 ASSERT_RTNL();
75319
75320 - if (local->open_count)
75321 + if (local_read(&local->open_count))
75322 return -EBUSY;
75323
75324 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75325 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75326 index c97a065..ff61928 100644
75327 --- a/net/mac80211/rc80211_pid_debugfs.c
75328 +++ b/net/mac80211/rc80211_pid_debugfs.c
75329 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75330
75331 spin_unlock_irqrestore(&events->lock, status);
75332
75333 - if (copy_to_user(buf, pb, p))
75334 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75335 return -EFAULT;
75336
75337 return p;
75338 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75339 index 9919892..8c49803 100644
75340 --- a/net/mac80211/util.c
75341 +++ b/net/mac80211/util.c
75342 @@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75343 }
75344 #endif
75345 /* everything else happens only if HW was up & running */
75346 - if (!local->open_count)
75347 + if (!local_read(&local->open_count))
75348 goto wake_up;
75349
75350 /*
75351 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75352 index f8ac4ef..b02560b 100644
75353 --- a/net/netfilter/Kconfig
75354 +++ b/net/netfilter/Kconfig
75355 @@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
75356
75357 To compile it as a module, choose M here. If unsure, say N.
75358
75359 +config NETFILTER_XT_MATCH_GRADM
75360 + tristate '"gradm" match support'
75361 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75362 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75363 + ---help---
75364 + The gradm match allows to match on grsecurity RBAC being enabled.
75365 + It is useful when iptables rules are applied early on bootup to
75366 + prevent connections to the machine (except from a trusted host)
75367 + while the RBAC system is disabled.
75368 +
75369 config NETFILTER_XT_MATCH_HASHLIMIT
75370 tristate '"hashlimit" match support'
75371 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75372 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75373 index 40f4c3d..0d5dd6b 100644
75374 --- a/net/netfilter/Makefile
75375 +++ b/net/netfilter/Makefile
75376 @@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75377 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75378 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75379 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75380 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75381 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75382 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75383 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75384 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75385 index 29fa5ba..8debc79 100644
75386 --- a/net/netfilter/ipvs/ip_vs_conn.c
75387 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75388 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75389 /* Increase the refcnt counter of the dest */
75390 atomic_inc(&dest->refcnt);
75391
75392 - conn_flags = atomic_read(&dest->conn_flags);
75393 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75394 if (cp->protocol != IPPROTO_UDP)
75395 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75396 /* Bind with the destination and its corresponding transmitter */
75397 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75398 atomic_set(&cp->refcnt, 1);
75399
75400 atomic_set(&cp->n_control, 0);
75401 - atomic_set(&cp->in_pkts, 0);
75402 + atomic_set_unchecked(&cp->in_pkts, 0);
75403
75404 atomic_inc(&ipvs->conn_count);
75405 if (flags & IP_VS_CONN_F_NO_CPORT)
75406 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75407
75408 /* Don't drop the entry if its number of incoming packets is not
75409 located in [0, 8] */
75410 - i = atomic_read(&cp->in_pkts);
75411 + i = atomic_read_unchecked(&cp->in_pkts);
75412 if (i > 8 || i < 0) return 0;
75413
75414 if (!todrop_rate[i]) return 0;
75415 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75416 index 2555816..31492d9 100644
75417 --- a/net/netfilter/ipvs/ip_vs_core.c
75418 +++ b/net/netfilter/ipvs/ip_vs_core.c
75419 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75420 ret = cp->packet_xmit(skb, cp, pd->pp);
75421 /* do not touch skb anymore */
75422
75423 - atomic_inc(&cp->in_pkts);
75424 + atomic_inc_unchecked(&cp->in_pkts);
75425 ip_vs_conn_put(cp);
75426 return ret;
75427 }
75428 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75429 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75430 pkts = sysctl_sync_threshold(ipvs);
75431 else
75432 - pkts = atomic_add_return(1, &cp->in_pkts);
75433 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75434
75435 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75436 cp->protocol == IPPROTO_SCTP) {
75437 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75438 index b3afe18..08ec940 100644
75439 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75440 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75441 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75442 ip_vs_rs_hash(ipvs, dest);
75443 write_unlock_bh(&ipvs->rs_lock);
75444 }
75445 - atomic_set(&dest->conn_flags, conn_flags);
75446 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75447
75448 /* bind the service */
75449 if (!dest->svc) {
75450 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75451 " %-7s %-6d %-10d %-10d\n",
75452 &dest->addr.in6,
75453 ntohs(dest->port),
75454 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75455 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75456 atomic_read(&dest->weight),
75457 atomic_read(&dest->activeconns),
75458 atomic_read(&dest->inactconns));
75459 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75460 "%-7s %-6d %-10d %-10d\n",
75461 ntohl(dest->addr.ip),
75462 ntohs(dest->port),
75463 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75464 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75465 atomic_read(&dest->weight),
75466 atomic_read(&dest->activeconns),
75467 atomic_read(&dest->inactconns));
75468 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75469
75470 entry.addr = dest->addr.ip;
75471 entry.port = dest->port;
75472 - entry.conn_flags = atomic_read(&dest->conn_flags);
75473 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75474 entry.weight = atomic_read(&dest->weight);
75475 entry.u_threshold = dest->u_threshold;
75476 entry.l_threshold = dest->l_threshold;
75477 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75478 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75479
75480 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75481 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75482 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75483 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75484 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75485 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75486 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75487 index 8a0d6d6..90ec197 100644
75488 --- a/net/netfilter/ipvs/ip_vs_sync.c
75489 +++ b/net/netfilter/ipvs/ip_vs_sync.c
75490 @@ -649,7 +649,7 @@ control:
75491 * i.e only increment in_pkts for Templates.
75492 */
75493 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75494 - int pkts = atomic_add_return(1, &cp->in_pkts);
75495 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75496
75497 if (pkts % sysctl_sync_period(ipvs) != 1)
75498 return;
75499 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75500
75501 if (opt)
75502 memcpy(&cp->in_seq, opt, sizeof(*opt));
75503 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75504 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75505 cp->state = state;
75506 cp->old_state = cp->state;
75507 /*
75508 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75509 index 7fd66de..e6fb361 100644
75510 --- a/net/netfilter/ipvs/ip_vs_xmit.c
75511 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
75512 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75513 else
75514 rc = NF_ACCEPT;
75515 /* do not touch skb anymore */
75516 - atomic_inc(&cp->in_pkts);
75517 + atomic_inc_unchecked(&cp->in_pkts);
75518 goto out;
75519 }
75520
75521 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75522 else
75523 rc = NF_ACCEPT;
75524 /* do not touch skb anymore */
75525 - atomic_inc(&cp->in_pkts);
75526 + atomic_inc_unchecked(&cp->in_pkts);
75527 goto out;
75528 }
75529
75530 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75531 index 66b2c54..c7884e3 100644
75532 --- a/net/netfilter/nfnetlink_log.c
75533 +++ b/net/netfilter/nfnetlink_log.c
75534 @@ -70,7 +70,7 @@ struct nfulnl_instance {
75535 };
75536
75537 static DEFINE_SPINLOCK(instances_lock);
75538 -static atomic_t global_seq;
75539 +static atomic_unchecked_t global_seq;
75540
75541 #define INSTANCE_BUCKETS 16
75542 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75543 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75544 /* global sequence number */
75545 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75546 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75547 - htonl(atomic_inc_return(&global_seq)));
75548 + htonl(atomic_inc_return_unchecked(&global_seq)));
75549
75550 if (data_len) {
75551 struct nlattr *nla;
75552 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75553 new file mode 100644
75554 index 0000000..6905327
75555 --- /dev/null
75556 +++ b/net/netfilter/xt_gradm.c
75557 @@ -0,0 +1,51 @@
75558 +/*
75559 + * gradm match for netfilter
75560 + * Copyright © Zbigniew Krzystolik, 2010
75561 + *
75562 + * This program is free software; you can redistribute it and/or modify
75563 + * it under the terms of the GNU General Public License; either version
75564 + * 2 or 3 as published by the Free Software Foundation.
75565 + */
75566 +#include <linux/module.h>
75567 +#include <linux/moduleparam.h>
75568 +#include <linux/skbuff.h>
75569 +#include <linux/netfilter/x_tables.h>
75570 +#include <linux/grsecurity.h>
75571 +#include <linux/netfilter/xt_gradm.h>
75572 +
75573 +static bool
75574 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
75575 +{
75576 + const struct xt_gradm_mtinfo *info = par->matchinfo;
75577 + bool retval = false;
75578 + if (gr_acl_is_enabled())
75579 + retval = true;
75580 + return retval ^ info->invflags;
75581 +}
75582 +
75583 +static struct xt_match gradm_mt_reg __read_mostly = {
75584 + .name = "gradm",
75585 + .revision = 0,
75586 + .family = NFPROTO_UNSPEC,
75587 + .match = gradm_mt,
75588 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
75589 + .me = THIS_MODULE,
75590 +};
75591 +
75592 +static int __init gradm_mt_init(void)
75593 +{
75594 + return xt_register_match(&gradm_mt_reg);
75595 +}
75596 +
75597 +static void __exit gradm_mt_exit(void)
75598 +{
75599 + xt_unregister_match(&gradm_mt_reg);
75600 +}
75601 +
75602 +module_init(gradm_mt_init);
75603 +module_exit(gradm_mt_exit);
75604 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
75605 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
75606 +MODULE_LICENSE("GPL");
75607 +MODULE_ALIAS("ipt_gradm");
75608 +MODULE_ALIAS("ip6t_gradm");
75609 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
75610 index 4fe4fb4..87a89e5 100644
75611 --- a/net/netfilter/xt_statistic.c
75612 +++ b/net/netfilter/xt_statistic.c
75613 @@ -19,7 +19,7 @@
75614 #include <linux/module.h>
75615
75616 struct xt_statistic_priv {
75617 - atomic_t count;
75618 + atomic_unchecked_t count;
75619 } ____cacheline_aligned_in_smp;
75620
75621 MODULE_LICENSE("GPL");
75622 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
75623 break;
75624 case XT_STATISTIC_MODE_NTH:
75625 do {
75626 - oval = atomic_read(&info->master->count);
75627 + oval = atomic_read_unchecked(&info->master->count);
75628 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
75629 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
75630 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
75631 if (nval == 0)
75632 ret = !ret;
75633 break;
75634 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
75635 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
75636 if (info->master == NULL)
75637 return -ENOMEM;
75638 - atomic_set(&info->master->count, info->u.nth.count);
75639 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
75640
75641 return 0;
75642 }
75643 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
75644 index 629b061..21cd04c 100644
75645 --- a/net/netlink/af_netlink.c
75646 +++ b/net/netlink/af_netlink.c
75647 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
75648 sk->sk_error_report(sk);
75649 }
75650 }
75651 - atomic_inc(&sk->sk_drops);
75652 + atomic_inc_unchecked(&sk->sk_drops);
75653 }
75654
75655 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
75656 @@ -1995,7 +1995,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
75657 sk_wmem_alloc_get(s),
75658 nlk->cb,
75659 atomic_read(&s->sk_refcnt),
75660 - atomic_read(&s->sk_drops),
75661 + atomic_read_unchecked(&s->sk_drops),
75662 sock_i_ino(s)
75663 );
75664
75665 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
75666 index 7dab229..212156f 100644
75667 --- a/net/netrom/af_netrom.c
75668 +++ b/net/netrom/af_netrom.c
75669 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75670 struct sock *sk = sock->sk;
75671 struct nr_sock *nr = nr_sk(sk);
75672
75673 + memset(sax, 0, sizeof(*sax));
75674 lock_sock(sk);
75675 if (peer != 0) {
75676 if (sk->sk_state != TCP_ESTABLISHED) {
75677 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75678 *uaddr_len = sizeof(struct full_sockaddr_ax25);
75679 } else {
75680 sax->fsa_ax25.sax25_family = AF_NETROM;
75681 - sax->fsa_ax25.sax25_ndigis = 0;
75682 sax->fsa_ax25.sax25_call = nr->source_addr;
75683 *uaddr_len = sizeof(struct sockaddr_ax25);
75684 }
75685 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
75686 index 2dbb32b..a1b4722 100644
75687 --- a/net/packet/af_packet.c
75688 +++ b/net/packet/af_packet.c
75689 @@ -1676,7 +1676,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75690
75691 spin_lock(&sk->sk_receive_queue.lock);
75692 po->stats.tp_packets++;
75693 - skb->dropcount = atomic_read(&sk->sk_drops);
75694 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75695 __skb_queue_tail(&sk->sk_receive_queue, skb);
75696 spin_unlock(&sk->sk_receive_queue.lock);
75697 sk->sk_data_ready(sk, skb->len);
75698 @@ -1685,7 +1685,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75699 drop_n_acct:
75700 spin_lock(&sk->sk_receive_queue.lock);
75701 po->stats.tp_drops++;
75702 - atomic_inc(&sk->sk_drops);
75703 + atomic_inc_unchecked(&sk->sk_drops);
75704 spin_unlock(&sk->sk_receive_queue.lock);
75705
75706 drop_n_restore:
75707 @@ -3271,7 +3271,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75708 case PACKET_HDRLEN:
75709 if (len > sizeof(int))
75710 len = sizeof(int);
75711 - if (copy_from_user(&val, optval, len))
75712 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
75713 return -EFAULT;
75714 switch (val) {
75715 case TPACKET_V1:
75716 @@ -3321,7 +3321,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75717
75718 if (put_user(len, optlen))
75719 return -EFAULT;
75720 - if (copy_to_user(optval, data, len))
75721 + if (len > sizeof(st) || copy_to_user(optval, data, len))
75722 return -EFAULT;
75723 return 0;
75724 }
75725 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
75726 index d65f699..05aa6ce 100644
75727 --- a/net/phonet/af_phonet.c
75728 +++ b/net/phonet/af_phonet.c
75729 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
75730 {
75731 struct phonet_protocol *pp;
75732
75733 - if (protocol >= PHONET_NPROTO)
75734 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75735 return NULL;
75736
75737 rcu_read_lock();
75738 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
75739 {
75740 int err = 0;
75741
75742 - if (protocol >= PHONET_NPROTO)
75743 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75744 return -EINVAL;
75745
75746 err = proto_register(pp->prot, 1);
75747 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
75748 index 9f60008..ae96f04 100644
75749 --- a/net/phonet/pep.c
75750 +++ b/net/phonet/pep.c
75751 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75752
75753 case PNS_PEP_CTRL_REQ:
75754 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
75755 - atomic_inc(&sk->sk_drops);
75756 + atomic_inc_unchecked(&sk->sk_drops);
75757 break;
75758 }
75759 __skb_pull(skb, 4);
75760 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75761 }
75762
75763 if (pn->rx_credits == 0) {
75764 - atomic_inc(&sk->sk_drops);
75765 + atomic_inc_unchecked(&sk->sk_drops);
75766 err = -ENOBUFS;
75767 break;
75768 }
75769 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
75770 }
75771
75772 if (pn->rx_credits == 0) {
75773 - atomic_inc(&sk->sk_drops);
75774 + atomic_inc_unchecked(&sk->sk_drops);
75775 err = NET_RX_DROP;
75776 break;
75777 }
75778 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
75779 index 4c7eff3..59c727f 100644
75780 --- a/net/phonet/socket.c
75781 +++ b/net/phonet/socket.c
75782 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
75783 pn->resource, sk->sk_state,
75784 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
75785 sock_i_uid(sk), sock_i_ino(sk),
75786 - atomic_read(&sk->sk_refcnt), sk,
75787 - atomic_read(&sk->sk_drops), &len);
75788 + atomic_read(&sk->sk_refcnt),
75789 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75790 + NULL,
75791 +#else
75792 + sk,
75793 +#endif
75794 + atomic_read_unchecked(&sk->sk_drops), &len);
75795 }
75796 seq_printf(seq, "%*s\n", 127 - len, "");
75797 return 0;
75798 diff --git a/net/rds/cong.c b/net/rds/cong.c
75799 index e5b65ac..f3b6fb7 100644
75800 --- a/net/rds/cong.c
75801 +++ b/net/rds/cong.c
75802 @@ -78,7 +78,7 @@
75803 * finds that the saved generation number is smaller than the global generation
75804 * number, it wakes up the process.
75805 */
75806 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
75807 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
75808
75809 /*
75810 * Congestion monitoring
75811 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
75812 rdsdebug("waking map %p for %pI4\n",
75813 map, &map->m_addr);
75814 rds_stats_inc(s_cong_update_received);
75815 - atomic_inc(&rds_cong_generation);
75816 + atomic_inc_unchecked(&rds_cong_generation);
75817 if (waitqueue_active(&map->m_waitq))
75818 wake_up(&map->m_waitq);
75819 if (waitqueue_active(&rds_poll_waitq))
75820 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
75821
75822 int rds_cong_updated_since(unsigned long *recent)
75823 {
75824 - unsigned long gen = atomic_read(&rds_cong_generation);
75825 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
75826
75827 if (likely(*recent == gen))
75828 return 0;
75829 diff --git a/net/rds/ib.h b/net/rds/ib.h
75830 index edfaaaf..8c89879 100644
75831 --- a/net/rds/ib.h
75832 +++ b/net/rds/ib.h
75833 @@ -128,7 +128,7 @@ struct rds_ib_connection {
75834 /* sending acks */
75835 unsigned long i_ack_flags;
75836 #ifdef KERNEL_HAS_ATOMIC64
75837 - atomic64_t i_ack_next; /* next ACK to send */
75838 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
75839 #else
75840 spinlock_t i_ack_lock; /* protect i_ack_next */
75841 u64 i_ack_next; /* next ACK to send */
75842 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
75843 index 51c8689..36c555f 100644
75844 --- a/net/rds/ib_cm.c
75845 +++ b/net/rds/ib_cm.c
75846 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
75847 /* Clear the ACK state */
75848 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
75849 #ifdef KERNEL_HAS_ATOMIC64
75850 - atomic64_set(&ic->i_ack_next, 0);
75851 + atomic64_set_unchecked(&ic->i_ack_next, 0);
75852 #else
75853 ic->i_ack_next = 0;
75854 #endif
75855 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
75856 index e29e0ca..fa3a6a3 100644
75857 --- a/net/rds/ib_recv.c
75858 +++ b/net/rds/ib_recv.c
75859 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
75860 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
75861 int ack_required)
75862 {
75863 - atomic64_set(&ic->i_ack_next, seq);
75864 + atomic64_set_unchecked(&ic->i_ack_next, seq);
75865 if (ack_required) {
75866 smp_mb__before_clear_bit();
75867 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75868 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
75869 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75870 smp_mb__after_clear_bit();
75871
75872 - return atomic64_read(&ic->i_ack_next);
75873 + return atomic64_read_unchecked(&ic->i_ack_next);
75874 }
75875 #endif
75876
75877 diff --git a/net/rds/iw.h b/net/rds/iw.h
75878 index 04ce3b1..48119a6 100644
75879 --- a/net/rds/iw.h
75880 +++ b/net/rds/iw.h
75881 @@ -134,7 +134,7 @@ struct rds_iw_connection {
75882 /* sending acks */
75883 unsigned long i_ack_flags;
75884 #ifdef KERNEL_HAS_ATOMIC64
75885 - atomic64_t i_ack_next; /* next ACK to send */
75886 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
75887 #else
75888 spinlock_t i_ack_lock; /* protect i_ack_next */
75889 u64 i_ack_next; /* next ACK to send */
75890 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
75891 index 9556d28..f046d0e 100644
75892 --- a/net/rds/iw_cm.c
75893 +++ b/net/rds/iw_cm.c
75894 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
75895 /* Clear the ACK state */
75896 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
75897 #ifdef KERNEL_HAS_ATOMIC64
75898 - atomic64_set(&ic->i_ack_next, 0);
75899 + atomic64_set_unchecked(&ic->i_ack_next, 0);
75900 #else
75901 ic->i_ack_next = 0;
75902 #endif
75903 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
75904 index 5e57347..3916042 100644
75905 --- a/net/rds/iw_recv.c
75906 +++ b/net/rds/iw_recv.c
75907 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
75908 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
75909 int ack_required)
75910 {
75911 - atomic64_set(&ic->i_ack_next, seq);
75912 + atomic64_set_unchecked(&ic->i_ack_next, seq);
75913 if (ack_required) {
75914 smp_mb__before_clear_bit();
75915 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75916 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
75917 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
75918 smp_mb__after_clear_bit();
75919
75920 - return atomic64_read(&ic->i_ack_next);
75921 + return atomic64_read_unchecked(&ic->i_ack_next);
75922 }
75923 #endif
75924
75925 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
75926 index edac9ef..16bcb98 100644
75927 --- a/net/rds/tcp.c
75928 +++ b/net/rds/tcp.c
75929 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
75930 int val = 1;
75931
75932 set_fs(KERNEL_DS);
75933 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
75934 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
75935 sizeof(val));
75936 set_fs(oldfs);
75937 }
75938 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
75939 index 1b4fd68..2234175 100644
75940 --- a/net/rds/tcp_send.c
75941 +++ b/net/rds/tcp_send.c
75942 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
75943
75944 oldfs = get_fs();
75945 set_fs(KERNEL_DS);
75946 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
75947 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
75948 sizeof(val));
75949 set_fs(oldfs);
75950 }
75951 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
75952 index 74c064c..fdec26f 100644
75953 --- a/net/rxrpc/af_rxrpc.c
75954 +++ b/net/rxrpc/af_rxrpc.c
75955 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
75956 __be32 rxrpc_epoch;
75957
75958 /* current debugging ID */
75959 -atomic_t rxrpc_debug_id;
75960 +atomic_unchecked_t rxrpc_debug_id;
75961
75962 /* count of skbs currently in use */
75963 atomic_t rxrpc_n_skbs;
75964 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
75965 index c3126e8..21facc7 100644
75966 --- a/net/rxrpc/ar-ack.c
75967 +++ b/net/rxrpc/ar-ack.c
75968 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
75969
75970 _enter("{%d,%d,%d,%d},",
75971 call->acks_hard, call->acks_unacked,
75972 - atomic_read(&call->sequence),
75973 + atomic_read_unchecked(&call->sequence),
75974 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
75975
75976 stop = 0;
75977 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
75978
75979 /* each Tx packet has a new serial number */
75980 sp->hdr.serial =
75981 - htonl(atomic_inc_return(&call->conn->serial));
75982 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
75983
75984 hdr = (struct rxrpc_header *) txb->head;
75985 hdr->serial = sp->hdr.serial;
75986 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
75987 */
75988 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
75989 {
75990 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
75991 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
75992 }
75993
75994 /*
75995 @@ -629,7 +629,7 @@ process_further:
75996
75997 latest = ntohl(sp->hdr.serial);
75998 hard = ntohl(ack.firstPacket);
75999 - tx = atomic_read(&call->sequence);
76000 + tx = atomic_read_unchecked(&call->sequence);
76001
76002 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76003 latest,
76004 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76005 goto maybe_reschedule;
76006
76007 send_ACK_with_skew:
76008 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76009 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76010 ntohl(ack.serial));
76011 send_ACK:
76012 mtu = call->conn->trans->peer->if_mtu;
76013 @@ -1173,7 +1173,7 @@ send_ACK:
76014 ackinfo.rxMTU = htonl(5692);
76015 ackinfo.jumbo_max = htonl(4);
76016
76017 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76018 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76019 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76020 ntohl(hdr.serial),
76021 ntohs(ack.maxSkew),
76022 @@ -1191,7 +1191,7 @@ send_ACK:
76023 send_message:
76024 _debug("send message");
76025
76026 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76027 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76028 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76029 send_message_2:
76030
76031 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76032 index bf656c2..48f9d27 100644
76033 --- a/net/rxrpc/ar-call.c
76034 +++ b/net/rxrpc/ar-call.c
76035 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76036 spin_lock_init(&call->lock);
76037 rwlock_init(&call->state_lock);
76038 atomic_set(&call->usage, 1);
76039 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76040 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76041 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76042
76043 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76044 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76045 index 4106ca9..a338d7a 100644
76046 --- a/net/rxrpc/ar-connection.c
76047 +++ b/net/rxrpc/ar-connection.c
76048 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76049 rwlock_init(&conn->lock);
76050 spin_lock_init(&conn->state_lock);
76051 atomic_set(&conn->usage, 1);
76052 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76053 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76054 conn->avail_calls = RXRPC_MAXCALLS;
76055 conn->size_align = 4;
76056 conn->header_size = sizeof(struct rxrpc_header);
76057 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76058 index e7ed43a..6afa140 100644
76059 --- a/net/rxrpc/ar-connevent.c
76060 +++ b/net/rxrpc/ar-connevent.c
76061 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76062
76063 len = iov[0].iov_len + iov[1].iov_len;
76064
76065 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76066 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76067 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76068
76069 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76070 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76071 index 1a2b0633..e8d1382 100644
76072 --- a/net/rxrpc/ar-input.c
76073 +++ b/net/rxrpc/ar-input.c
76074 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76075 /* track the latest serial number on this connection for ACK packet
76076 * information */
76077 serial = ntohl(sp->hdr.serial);
76078 - hi_serial = atomic_read(&call->conn->hi_serial);
76079 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76080 while (serial > hi_serial)
76081 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76082 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76083 serial);
76084
76085 /* request ACK generation for any ACK or DATA packet that requests
76086 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76087 index 8e22bd3..f66d1c0 100644
76088 --- a/net/rxrpc/ar-internal.h
76089 +++ b/net/rxrpc/ar-internal.h
76090 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76091 int error; /* error code for local abort */
76092 int debug_id; /* debug ID for printks */
76093 unsigned call_counter; /* call ID counter */
76094 - atomic_t serial; /* packet serial number counter */
76095 - atomic_t hi_serial; /* highest serial number received */
76096 + atomic_unchecked_t serial; /* packet serial number counter */
76097 + atomic_unchecked_t hi_serial; /* highest serial number received */
76098 u8 avail_calls; /* number of calls available */
76099 u8 size_align; /* data size alignment (for security) */
76100 u8 header_size; /* rxrpc + security header size */
76101 @@ -346,7 +346,7 @@ struct rxrpc_call {
76102 spinlock_t lock;
76103 rwlock_t state_lock; /* lock for state transition */
76104 atomic_t usage;
76105 - atomic_t sequence; /* Tx data packet sequence counter */
76106 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76107 u32 abort_code; /* local/remote abort code */
76108 enum { /* current state of call */
76109 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76110 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76111 */
76112 extern atomic_t rxrpc_n_skbs;
76113 extern __be32 rxrpc_epoch;
76114 -extern atomic_t rxrpc_debug_id;
76115 +extern atomic_unchecked_t rxrpc_debug_id;
76116 extern struct workqueue_struct *rxrpc_workqueue;
76117
76118 /*
76119 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76120 index 87f7135..74d3703 100644
76121 --- a/net/rxrpc/ar-local.c
76122 +++ b/net/rxrpc/ar-local.c
76123 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76124 spin_lock_init(&local->lock);
76125 rwlock_init(&local->services_lock);
76126 atomic_set(&local->usage, 1);
76127 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76128 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76129 memcpy(&local->srx, srx, sizeof(*srx));
76130 }
76131
76132 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76133 index 16ae887..d24f12b 100644
76134 --- a/net/rxrpc/ar-output.c
76135 +++ b/net/rxrpc/ar-output.c
76136 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76137 sp->hdr.cid = call->cid;
76138 sp->hdr.callNumber = call->call_id;
76139 sp->hdr.seq =
76140 - htonl(atomic_inc_return(&call->sequence));
76141 + htonl(atomic_inc_return_unchecked(&call->sequence));
76142 sp->hdr.serial =
76143 - htonl(atomic_inc_return(&conn->serial));
76144 + htonl(atomic_inc_return_unchecked(&conn->serial));
76145 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76146 sp->hdr.userStatus = 0;
76147 sp->hdr.securityIndex = conn->security_ix;
76148 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76149 index 2754f09..b20e38f 100644
76150 --- a/net/rxrpc/ar-peer.c
76151 +++ b/net/rxrpc/ar-peer.c
76152 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76153 INIT_LIST_HEAD(&peer->error_targets);
76154 spin_lock_init(&peer->lock);
76155 atomic_set(&peer->usage, 1);
76156 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76157 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76158 memcpy(&peer->srx, srx, sizeof(*srx));
76159
76160 rxrpc_assess_MTU_size(peer);
76161 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76162 index 38047f7..9f48511 100644
76163 --- a/net/rxrpc/ar-proc.c
76164 +++ b/net/rxrpc/ar-proc.c
76165 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76166 atomic_read(&conn->usage),
76167 rxrpc_conn_states[conn->state],
76168 key_serial(conn->key),
76169 - atomic_read(&conn->serial),
76170 - atomic_read(&conn->hi_serial));
76171 + atomic_read_unchecked(&conn->serial),
76172 + atomic_read_unchecked(&conn->hi_serial));
76173
76174 return 0;
76175 }
76176 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76177 index 92df566..87ec1bf 100644
76178 --- a/net/rxrpc/ar-transport.c
76179 +++ b/net/rxrpc/ar-transport.c
76180 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76181 spin_lock_init(&trans->client_lock);
76182 rwlock_init(&trans->conn_lock);
76183 atomic_set(&trans->usage, 1);
76184 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76185 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76186
76187 if (peer->srx.transport.family == AF_INET) {
76188 switch (peer->srx.transport_type) {
76189 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76190 index 7635107..4670276 100644
76191 --- a/net/rxrpc/rxkad.c
76192 +++ b/net/rxrpc/rxkad.c
76193 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76194
76195 len = iov[0].iov_len + iov[1].iov_len;
76196
76197 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76198 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76199 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76200
76201 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76202 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76203
76204 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76205
76206 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76207 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76208 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76209
76210 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76211 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76212 index 1e2eee8..ce3967e 100644
76213 --- a/net/sctp/proc.c
76214 +++ b/net/sctp/proc.c
76215 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76216 seq_printf(seq,
76217 "%8pK %8pK %-3d %-3d %-2d %-4d "
76218 "%4d %8d %8d %7d %5lu %-5d %5d ",
76219 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76220 + assoc, sk,
76221 + sctp_sk(sk)->type, sk->sk_state,
76222 assoc->state, hash,
76223 assoc->assoc_id,
76224 assoc->sndbuf_used,
76225 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76226 index 408ebd0..202aa85 100644
76227 --- a/net/sctp/socket.c
76228 +++ b/net/sctp/socket.c
76229 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76230 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76231 if (space_left < addrlen)
76232 return -ENOMEM;
76233 - if (copy_to_user(to, &temp, addrlen))
76234 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76235 return -EFAULT;
76236 to += addrlen;
76237 cnt++;
76238 diff --git a/net/socket.c b/net/socket.c
76239 index 28a96af..61a7a06 100644
76240 --- a/net/socket.c
76241 +++ b/net/socket.c
76242 @@ -88,6 +88,7 @@
76243 #include <linux/nsproxy.h>
76244 #include <linux/magic.h>
76245 #include <linux/slab.h>
76246 +#include <linux/in.h>
76247
76248 #include <asm/uaccess.h>
76249 #include <asm/unistd.h>
76250 @@ -105,6 +106,8 @@
76251 #include <linux/sockios.h>
76252 #include <linux/atalk.h>
76253
76254 +#include <linux/grsock.h>
76255 +
76256 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76257 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76258 unsigned long nr_segs, loff_t pos);
76259 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76260 &sockfs_dentry_operations, SOCKFS_MAGIC);
76261 }
76262
76263 -static struct vfsmount *sock_mnt __read_mostly;
76264 +struct vfsmount *sock_mnt __read_mostly;
76265
76266 static struct file_system_type sock_fs_type = {
76267 .name = "sockfs",
76268 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76269 return -EAFNOSUPPORT;
76270 if (type < 0 || type >= SOCK_MAX)
76271 return -EINVAL;
76272 + if (protocol < 0)
76273 + return -EINVAL;
76274
76275 /* Compatibility.
76276
76277 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76278 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76279 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76280
76281 + if(!gr_search_socket(family, type, protocol)) {
76282 + retval = -EACCES;
76283 + goto out;
76284 + }
76285 +
76286 + if (gr_handle_sock_all(family, type, protocol)) {
76287 + retval = -EACCES;
76288 + goto out;
76289 + }
76290 +
76291 retval = sock_create(family, type, protocol, &sock);
76292 if (retval < 0)
76293 goto out;
76294 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76295 if (sock) {
76296 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
76297 if (err >= 0) {
76298 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76299 + err = -EACCES;
76300 + goto error;
76301 + }
76302 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76303 + if (err)
76304 + goto error;
76305 +
76306 err = security_socket_bind(sock,
76307 (struct sockaddr *)&address,
76308 addrlen);
76309 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76310 (struct sockaddr *)
76311 &address, addrlen);
76312 }
76313 +error:
76314 fput_light(sock->file, fput_needed);
76315 }
76316 return err;
76317 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76318 if ((unsigned)backlog > somaxconn)
76319 backlog = somaxconn;
76320
76321 + if (gr_handle_sock_server_other(sock->sk)) {
76322 + err = -EPERM;
76323 + goto error;
76324 + }
76325 +
76326 + err = gr_search_listen(sock);
76327 + if (err)
76328 + goto error;
76329 +
76330 err = security_socket_listen(sock, backlog);
76331 if (!err)
76332 err = sock->ops->listen(sock, backlog);
76333
76334 +error:
76335 fput_light(sock->file, fput_needed);
76336 }
76337 return err;
76338 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76339 newsock->type = sock->type;
76340 newsock->ops = sock->ops;
76341
76342 + if (gr_handle_sock_server_other(sock->sk)) {
76343 + err = -EPERM;
76344 + sock_release(newsock);
76345 + goto out_put;
76346 + }
76347 +
76348 + err = gr_search_accept(sock);
76349 + if (err) {
76350 + sock_release(newsock);
76351 + goto out_put;
76352 + }
76353 +
76354 /*
76355 * We don't need try_module_get here, as the listening socket (sock)
76356 * has the protocol module (sock->ops->owner) held.
76357 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76358 fd_install(newfd, newfile);
76359 err = newfd;
76360
76361 + gr_attach_curr_ip(newsock->sk);
76362 +
76363 out_put:
76364 fput_light(sock->file, fput_needed);
76365 out:
76366 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76367 int, addrlen)
76368 {
76369 struct socket *sock;
76370 + struct sockaddr *sck;
76371 struct sockaddr_storage address;
76372 int err, fput_needed;
76373
76374 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76375 if (err < 0)
76376 goto out_put;
76377
76378 + sck = (struct sockaddr *)&address;
76379 +
76380 + if (gr_handle_sock_client(sck)) {
76381 + err = -EACCES;
76382 + goto out_put;
76383 + }
76384 +
76385 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76386 + if (err)
76387 + goto out_put;
76388 +
76389 err =
76390 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76391 if (err)
76392 @@ -1970,7 +2030,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76393 * checking falls down on this.
76394 */
76395 if (copy_from_user(ctl_buf,
76396 - (void __user __force *)msg_sys->msg_control,
76397 + (void __force_user *)msg_sys->msg_control,
76398 ctl_len))
76399 goto out_freectl;
76400 msg_sys->msg_control = ctl_buf;
76401 @@ -2140,7 +2200,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76402 * kernel msghdr to use the kernel address space)
76403 */
76404
76405 - uaddr = (__force void __user *)msg_sys->msg_name;
76406 + uaddr = (void __force_user *)msg_sys->msg_name;
76407 uaddr_len = COMPAT_NAMELEN(msg);
76408 if (MSG_CMSG_COMPAT & flags) {
76409 err = verify_compat_iovec(msg_sys, iov,
76410 @@ -2768,7 +2828,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76411 }
76412
76413 ifr = compat_alloc_user_space(buf_size);
76414 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76415 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76416
76417 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76418 return -EFAULT;
76419 @@ -2792,12 +2852,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76420 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76421
76422 if (copy_in_user(rxnfc, compat_rxnfc,
76423 - (void *)(&rxnfc->fs.m_ext + 1) -
76424 - (void *)rxnfc) ||
76425 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76426 + (void __user *)rxnfc) ||
76427 copy_in_user(&rxnfc->fs.ring_cookie,
76428 &compat_rxnfc->fs.ring_cookie,
76429 - (void *)(&rxnfc->fs.location + 1) -
76430 - (void *)&rxnfc->fs.ring_cookie) ||
76431 + (void __user *)(&rxnfc->fs.location + 1) -
76432 + (void __user *)&rxnfc->fs.ring_cookie) ||
76433 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76434 sizeof(rxnfc->rule_cnt)))
76435 return -EFAULT;
76436 @@ -2809,12 +2869,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76437
76438 if (convert_out) {
76439 if (copy_in_user(compat_rxnfc, rxnfc,
76440 - (const void *)(&rxnfc->fs.m_ext + 1) -
76441 - (const void *)rxnfc) ||
76442 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76443 + (const void __user *)rxnfc) ||
76444 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76445 &rxnfc->fs.ring_cookie,
76446 - (const void *)(&rxnfc->fs.location + 1) -
76447 - (const void *)&rxnfc->fs.ring_cookie) ||
76448 + (const void __user *)(&rxnfc->fs.location + 1) -
76449 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76450 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76451 sizeof(rxnfc->rule_cnt)))
76452 return -EFAULT;
76453 @@ -2884,7 +2944,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76454 old_fs = get_fs();
76455 set_fs(KERNEL_DS);
76456 err = dev_ioctl(net, cmd,
76457 - (struct ifreq __user __force *) &kifr);
76458 + (struct ifreq __force_user *) &kifr);
76459 set_fs(old_fs);
76460
76461 return err;
76462 @@ -2993,7 +3053,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76463
76464 old_fs = get_fs();
76465 set_fs(KERNEL_DS);
76466 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76467 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76468 set_fs(old_fs);
76469
76470 if (cmd == SIOCGIFMAP && !err) {
76471 @@ -3098,7 +3158,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
76472 ret |= __get_user(rtdev, &(ur4->rt_dev));
76473 if (rtdev) {
76474 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
76475 - r4.rt_dev = (char __user __force *)devname;
76476 + r4.rt_dev = (char __force_user *)devname;
76477 devname[15] = 0;
76478 } else
76479 r4.rt_dev = NULL;
76480 @@ -3324,8 +3384,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
76481 int __user *uoptlen;
76482 int err;
76483
76484 - uoptval = (char __user __force *) optval;
76485 - uoptlen = (int __user __force *) optlen;
76486 + uoptval = (char __force_user *) optval;
76487 + uoptlen = (int __force_user *) optlen;
76488
76489 set_fs(KERNEL_DS);
76490 if (level == SOL_SOCKET)
76491 @@ -3345,7 +3405,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
76492 char __user *uoptval;
76493 int err;
76494
76495 - uoptval = (char __user __force *) optval;
76496 + uoptval = (char __force_user *) optval;
76497
76498 set_fs(KERNEL_DS);
76499 if (level == SOL_SOCKET)
76500 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
76501 index 3341d89..c662621 100644
76502 --- a/net/sunrpc/sched.c
76503 +++ b/net/sunrpc/sched.c
76504 @@ -239,9 +239,9 @@ static int rpc_wait_bit_killable(void *word)
76505 #ifdef RPC_DEBUG
76506 static void rpc_task_set_debuginfo(struct rpc_task *task)
76507 {
76508 - static atomic_t rpc_pid;
76509 + static atomic_unchecked_t rpc_pid;
76510
76511 - task->tk_pid = atomic_inc_return(&rpc_pid);
76512 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
76513 }
76514 #else
76515 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
76516 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
76517 index 4645709..d41d668 100644
76518 --- a/net/sunrpc/svcsock.c
76519 +++ b/net/sunrpc/svcsock.c
76520 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
76521 int buflen, unsigned int base)
76522 {
76523 size_t save_iovlen;
76524 - void __user *save_iovbase;
76525 + void *save_iovbase;
76526 unsigned int i;
76527 int ret;
76528
76529 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
76530 index 09af4fa..77110a9 100644
76531 --- a/net/sunrpc/xprtrdma/svc_rdma.c
76532 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
76533 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
76534 static unsigned int min_max_inline = 4096;
76535 static unsigned int max_max_inline = 65536;
76536
76537 -atomic_t rdma_stat_recv;
76538 -atomic_t rdma_stat_read;
76539 -atomic_t rdma_stat_write;
76540 -atomic_t rdma_stat_sq_starve;
76541 -atomic_t rdma_stat_rq_starve;
76542 -atomic_t rdma_stat_rq_poll;
76543 -atomic_t rdma_stat_rq_prod;
76544 -atomic_t rdma_stat_sq_poll;
76545 -atomic_t rdma_stat_sq_prod;
76546 +atomic_unchecked_t rdma_stat_recv;
76547 +atomic_unchecked_t rdma_stat_read;
76548 +atomic_unchecked_t rdma_stat_write;
76549 +atomic_unchecked_t rdma_stat_sq_starve;
76550 +atomic_unchecked_t rdma_stat_rq_starve;
76551 +atomic_unchecked_t rdma_stat_rq_poll;
76552 +atomic_unchecked_t rdma_stat_rq_prod;
76553 +atomic_unchecked_t rdma_stat_sq_poll;
76554 +atomic_unchecked_t rdma_stat_sq_prod;
76555
76556 /* Temporary NFS request map and context caches */
76557 struct kmem_cache *svc_rdma_map_cachep;
76558 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
76559 len -= *ppos;
76560 if (len > *lenp)
76561 len = *lenp;
76562 - if (len && copy_to_user(buffer, str_buf, len))
76563 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
76564 return -EFAULT;
76565 *lenp = len;
76566 *ppos += len;
76567 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
76568 {
76569 .procname = "rdma_stat_read",
76570 .data = &rdma_stat_read,
76571 - .maxlen = sizeof(atomic_t),
76572 + .maxlen = sizeof(atomic_unchecked_t),
76573 .mode = 0644,
76574 .proc_handler = read_reset_stat,
76575 },
76576 {
76577 .procname = "rdma_stat_recv",
76578 .data = &rdma_stat_recv,
76579 - .maxlen = sizeof(atomic_t),
76580 + .maxlen = sizeof(atomic_unchecked_t),
76581 .mode = 0644,
76582 .proc_handler = read_reset_stat,
76583 },
76584 {
76585 .procname = "rdma_stat_write",
76586 .data = &rdma_stat_write,
76587 - .maxlen = sizeof(atomic_t),
76588 + .maxlen = sizeof(atomic_unchecked_t),
76589 .mode = 0644,
76590 .proc_handler = read_reset_stat,
76591 },
76592 {
76593 .procname = "rdma_stat_sq_starve",
76594 .data = &rdma_stat_sq_starve,
76595 - .maxlen = sizeof(atomic_t),
76596 + .maxlen = sizeof(atomic_unchecked_t),
76597 .mode = 0644,
76598 .proc_handler = read_reset_stat,
76599 },
76600 {
76601 .procname = "rdma_stat_rq_starve",
76602 .data = &rdma_stat_rq_starve,
76603 - .maxlen = sizeof(atomic_t),
76604 + .maxlen = sizeof(atomic_unchecked_t),
76605 .mode = 0644,
76606 .proc_handler = read_reset_stat,
76607 },
76608 {
76609 .procname = "rdma_stat_rq_poll",
76610 .data = &rdma_stat_rq_poll,
76611 - .maxlen = sizeof(atomic_t),
76612 + .maxlen = sizeof(atomic_unchecked_t),
76613 .mode = 0644,
76614 .proc_handler = read_reset_stat,
76615 },
76616 {
76617 .procname = "rdma_stat_rq_prod",
76618 .data = &rdma_stat_rq_prod,
76619 - .maxlen = sizeof(atomic_t),
76620 + .maxlen = sizeof(atomic_unchecked_t),
76621 .mode = 0644,
76622 .proc_handler = read_reset_stat,
76623 },
76624 {
76625 .procname = "rdma_stat_sq_poll",
76626 .data = &rdma_stat_sq_poll,
76627 - .maxlen = sizeof(atomic_t),
76628 + .maxlen = sizeof(atomic_unchecked_t),
76629 .mode = 0644,
76630 .proc_handler = read_reset_stat,
76631 },
76632 {
76633 .procname = "rdma_stat_sq_prod",
76634 .data = &rdma_stat_sq_prod,
76635 - .maxlen = sizeof(atomic_t),
76636 + .maxlen = sizeof(atomic_unchecked_t),
76637 .mode = 0644,
76638 .proc_handler = read_reset_stat,
76639 },
76640 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76641 index df67211..c354b13 100644
76642 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76643 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76644 @@ -499,7 +499,7 @@ next_sge:
76645 svc_rdma_put_context(ctxt, 0);
76646 goto out;
76647 }
76648 - atomic_inc(&rdma_stat_read);
76649 + atomic_inc_unchecked(&rdma_stat_read);
76650
76651 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
76652 chl_map->ch[ch_no].count -= read_wr.num_sge;
76653 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76654 dto_q);
76655 list_del_init(&ctxt->dto_q);
76656 } else {
76657 - atomic_inc(&rdma_stat_rq_starve);
76658 + atomic_inc_unchecked(&rdma_stat_rq_starve);
76659 clear_bit(XPT_DATA, &xprt->xpt_flags);
76660 ctxt = NULL;
76661 }
76662 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76663 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
76664 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
76665 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
76666 - atomic_inc(&rdma_stat_recv);
76667 + atomic_inc_unchecked(&rdma_stat_recv);
76668
76669 /* Build up the XDR from the receive buffers. */
76670 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
76671 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76672 index 249a835..fb2794b 100644
76673 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76674 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76675 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
76676 write_wr.wr.rdma.remote_addr = to;
76677
76678 /* Post It */
76679 - atomic_inc(&rdma_stat_write);
76680 + atomic_inc_unchecked(&rdma_stat_write);
76681 if (svc_rdma_send(xprt, &write_wr))
76682 goto err;
76683 return 0;
76684 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76685 index 894cb42..cf5bafb 100644
76686 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
76687 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76688 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76689 return;
76690
76691 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
76692 - atomic_inc(&rdma_stat_rq_poll);
76693 + atomic_inc_unchecked(&rdma_stat_rq_poll);
76694
76695 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
76696 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
76697 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76698 }
76699
76700 if (ctxt)
76701 - atomic_inc(&rdma_stat_rq_prod);
76702 + atomic_inc_unchecked(&rdma_stat_rq_prod);
76703
76704 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
76705 /*
76706 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76707 return;
76708
76709 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
76710 - atomic_inc(&rdma_stat_sq_poll);
76711 + atomic_inc_unchecked(&rdma_stat_sq_poll);
76712 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
76713 if (wc.status != IB_WC_SUCCESS)
76714 /* Close the transport */
76715 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76716 }
76717
76718 if (ctxt)
76719 - atomic_inc(&rdma_stat_sq_prod);
76720 + atomic_inc_unchecked(&rdma_stat_sq_prod);
76721 }
76722
76723 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
76724 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
76725 spin_lock_bh(&xprt->sc_lock);
76726 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
76727 spin_unlock_bh(&xprt->sc_lock);
76728 - atomic_inc(&rdma_stat_sq_starve);
76729 + atomic_inc_unchecked(&rdma_stat_sq_starve);
76730
76731 /* See if we can opportunistically reap SQ WR to make room */
76732 sq_cq_reap(xprt);
76733 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
76734 index e758139..d29ea47 100644
76735 --- a/net/sysctl_net.c
76736 +++ b/net/sysctl_net.c
76737 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
76738 struct ctl_table *table)
76739 {
76740 /* Allow network administrator to have same access as root. */
76741 - if (capable(CAP_NET_ADMIN)) {
76742 + if (capable_nolog(CAP_NET_ADMIN)) {
76743 int mode = (table->mode >> 6) & 7;
76744 return (mode << 6) | (mode << 3) | mode;
76745 }
76746 diff --git a/net/tipc/link.c b/net/tipc/link.c
76747 index ac1832a..533ed97 100644
76748 --- a/net/tipc/link.c
76749 +++ b/net/tipc/link.c
76750 @@ -1205,7 +1205,7 @@ static int link_send_sections_long(struct tipc_port *sender,
76751 struct tipc_msg fragm_hdr;
76752 struct sk_buff *buf, *buf_chain, *prev;
76753 u32 fragm_crs, fragm_rest, hsz, sect_rest;
76754 - const unchar *sect_crs;
76755 + const unchar __user *sect_crs;
76756 int curr_sect;
76757 u32 fragm_no;
76758
76759 @@ -1249,7 +1249,7 @@ again:
76760
76761 if (!sect_rest) {
76762 sect_rest = msg_sect[++curr_sect].iov_len;
76763 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
76764 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
76765 }
76766
76767 if (sect_rest < fragm_rest)
76768 @@ -1268,7 +1268,7 @@ error:
76769 }
76770 } else
76771 skb_copy_to_linear_data_offset(buf, fragm_crs,
76772 - sect_crs, sz);
76773 + (const void __force_kernel *)sect_crs, sz);
76774 sect_crs += sz;
76775 sect_rest -= sz;
76776 fragm_crs += sz;
76777 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
76778 index 3e4d3e2..27b55dc 100644
76779 --- a/net/tipc/msg.c
76780 +++ b/net/tipc/msg.c
76781 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
76782 msg_sect[cnt].iov_len);
76783 else
76784 skb_copy_to_linear_data_offset(*buf, pos,
76785 - msg_sect[cnt].iov_base,
76786 + (const void __force_kernel *)msg_sect[cnt].iov_base,
76787 msg_sect[cnt].iov_len);
76788 pos += msg_sect[cnt].iov_len;
76789 }
76790 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
76791 index 8c49566..14510cb 100644
76792 --- a/net/tipc/subscr.c
76793 +++ b/net/tipc/subscr.c
76794 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
76795 {
76796 struct iovec msg_sect;
76797
76798 - msg_sect.iov_base = (void *)&sub->evt;
76799 + msg_sect.iov_base = (void __force_user *)&sub->evt;
76800 msg_sect.iov_len = sizeof(struct tipc_event);
76801
76802 sub->evt.event = htohl(event, sub->swap);
76803 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
76804 index 85d3bb7..79f4487 100644
76805 --- a/net/unix/af_unix.c
76806 +++ b/net/unix/af_unix.c
76807 @@ -770,6 +770,12 @@ static struct sock *unix_find_other(struct net *net,
76808 err = -ECONNREFUSED;
76809 if (!S_ISSOCK(inode->i_mode))
76810 goto put_fail;
76811 +
76812 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
76813 + err = -EACCES;
76814 + goto put_fail;
76815 + }
76816 +
76817 u = unix_find_socket_byinode(inode);
76818 if (!u)
76819 goto put_fail;
76820 @@ -790,6 +796,13 @@ static struct sock *unix_find_other(struct net *net,
76821 if (u) {
76822 struct dentry *dentry;
76823 dentry = unix_sk(u)->dentry;
76824 +
76825 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
76826 + err = -EPERM;
76827 + sock_put(u);
76828 + goto fail;
76829 + }
76830 +
76831 if (dentry)
76832 touch_atime(unix_sk(u)->mnt, dentry);
76833 } else
76834 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
76835 err = security_path_mknod(&path, dentry, mode, 0);
76836 if (err)
76837 goto out_mknod_drop_write;
76838 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
76839 + err = -EACCES;
76840 + goto out_mknod_drop_write;
76841 + }
76842 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
76843 out_mknod_drop_write:
76844 mnt_drop_write(path.mnt);
76845 if (err)
76846 goto out_mknod_dput;
76847 +
76848 + gr_handle_create(dentry, path.mnt);
76849 +
76850 mutex_unlock(&path.dentry->d_inode->i_mutex);
76851 dput(path.dentry);
76852 path.dentry = dentry;
76853 diff --git a/net/wireless/core.h b/net/wireless/core.h
76854 index 43ad9c8..ab5127c 100644
76855 --- a/net/wireless/core.h
76856 +++ b/net/wireless/core.h
76857 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
76858 struct mutex mtx;
76859
76860 /* rfkill support */
76861 - struct rfkill_ops rfkill_ops;
76862 + rfkill_ops_no_const rfkill_ops;
76863 struct rfkill *rfkill;
76864 struct work_struct rfkill_sync;
76865
76866 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
76867 index 0af7f54..c916d2f 100644
76868 --- a/net/wireless/wext-core.c
76869 +++ b/net/wireless/wext-core.c
76870 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
76871 */
76872
76873 /* Support for very large requests */
76874 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
76875 - (user_length > descr->max_tokens)) {
76876 + if (user_length > descr->max_tokens) {
76877 /* Allow userspace to GET more than max so
76878 * we can support any size GET requests.
76879 * There is still a limit : -ENOMEM.
76880 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
76881 }
76882 }
76883
76884 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
76885 - /*
76886 - * If this is a GET, but not NOMAX, it means that the extra
76887 - * data is not bounded by userspace, but by max_tokens. Thus
76888 - * set the length to max_tokens. This matches the extra data
76889 - * allocation.
76890 - * The driver should fill it with the number of tokens it
76891 - * provided, and it may check iwp->length rather than having
76892 - * knowledge of max_tokens. If the driver doesn't change the
76893 - * iwp->length, this ioctl just copies back max_token tokens
76894 - * filled with zeroes. Hopefully the driver isn't claiming
76895 - * them to be valid data.
76896 - */
76897 - iwp->length = descr->max_tokens;
76898 - }
76899 -
76900 err = handler(dev, info, (union iwreq_data *) iwp, extra);
76901
76902 iwp->length += essid_compat;
76903 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
76904 index 7661576..80f7627 100644
76905 --- a/net/xfrm/xfrm_policy.c
76906 +++ b/net/xfrm/xfrm_policy.c
76907 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
76908 {
76909 policy->walk.dead = 1;
76910
76911 - atomic_inc(&policy->genid);
76912 + atomic_inc_unchecked(&policy->genid);
76913
76914 if (del_timer(&policy->timer))
76915 xfrm_pol_put(policy);
76916 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
76917 hlist_add_head(&policy->bydst, chain);
76918 xfrm_pol_hold(policy);
76919 net->xfrm.policy_count[dir]++;
76920 - atomic_inc(&flow_cache_genid);
76921 + atomic_inc_unchecked(&flow_cache_genid);
76922 if (delpol)
76923 __xfrm_policy_unlink(delpol, dir);
76924 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
76925 @@ -1530,7 +1530,7 @@ free_dst:
76926 goto out;
76927 }
76928
76929 -static int inline
76930 +static inline int
76931 xfrm_dst_alloc_copy(void **target, const void *src, int size)
76932 {
76933 if (!*target) {
76934 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
76935 return 0;
76936 }
76937
76938 -static int inline
76939 +static inline int
76940 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
76941 {
76942 #ifdef CONFIG_XFRM_SUB_POLICY
76943 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
76944 #endif
76945 }
76946
76947 -static int inline
76948 +static inline int
76949 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
76950 {
76951 #ifdef CONFIG_XFRM_SUB_POLICY
76952 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
76953
76954 xdst->num_pols = num_pols;
76955 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
76956 - xdst->policy_genid = atomic_read(&pols[0]->genid);
76957 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
76958
76959 return xdst;
76960 }
76961 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
76962 if (xdst->xfrm_genid != dst->xfrm->genid)
76963 return 0;
76964 if (xdst->num_pols > 0 &&
76965 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
76966 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
76967 return 0;
76968
76969 mtu = dst_mtu(dst->child);
76970 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
76971 sizeof(pol->xfrm_vec[i].saddr));
76972 pol->xfrm_vec[i].encap_family = mp->new_family;
76973 /* flush bundles */
76974 - atomic_inc(&pol->genid);
76975 + atomic_inc_unchecked(&pol->genid);
76976 }
76977 }
76978
76979 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
76980 index d2b366c..51ff91ebc 100644
76981 --- a/scripts/Makefile.build
76982 +++ b/scripts/Makefile.build
76983 @@ -109,7 +109,7 @@ endif
76984 endif
76985
76986 # Do not include host rules unless needed
76987 -ifneq ($(hostprogs-y)$(hostprogs-m),)
76988 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
76989 include scripts/Makefile.host
76990 endif
76991
76992 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
76993 index 686cb0d..9d653bf 100644
76994 --- a/scripts/Makefile.clean
76995 +++ b/scripts/Makefile.clean
76996 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
76997 __clean-files := $(extra-y) $(always) \
76998 $(targets) $(clean-files) \
76999 $(host-progs) \
77000 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77001 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77002 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77003
77004 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77005
77006 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77007 index 1ac414f..a1c1451 100644
77008 --- a/scripts/Makefile.host
77009 +++ b/scripts/Makefile.host
77010 @@ -31,6 +31,7 @@
77011 # Note: Shared libraries consisting of C++ files are not supported
77012
77013 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77014 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77015
77016 # C code
77017 # Executables compiled from a single .c file
77018 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77019 # Shared libaries (only .c supported)
77020 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77021 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77022 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77023 # Remove .so files from "xxx-objs"
77024 host-cobjs := $(filter-out %.so,$(host-cobjs))
77025
77026 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77027 index cb1f50c..cef2a7c 100644
77028 --- a/scripts/basic/fixdep.c
77029 +++ b/scripts/basic/fixdep.c
77030 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77031 /*
77032 * Lookup a value in the configuration string.
77033 */
77034 -static int is_defined_config(const char *name, int len, unsigned int hash)
77035 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77036 {
77037 struct item *aux;
77038
77039 @@ -211,10 +211,10 @@ static void clear_config(void)
77040 /*
77041 * Record the use of a CONFIG_* word.
77042 */
77043 -static void use_config(const char *m, int slen)
77044 +static void use_config(const char *m, unsigned int slen)
77045 {
77046 unsigned int hash = strhash(m, slen);
77047 - int c, i;
77048 + unsigned int c, i;
77049
77050 if (is_defined_config(m, slen, hash))
77051 return;
77052 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77053
77054 static void parse_config_file(const char *map, size_t len)
77055 {
77056 - const int *end = (const int *) (map + len);
77057 + const unsigned int *end = (const unsigned int *) (map + len);
77058 /* start at +1, so that p can never be < map */
77059 - const int *m = (const int *) map + 1;
77060 + const unsigned int *m = (const unsigned int *) map + 1;
77061 const char *p, *q;
77062
77063 for (; m < end; m++) {
77064 @@ -406,7 +406,7 @@ static void print_deps(void)
77065 static void traps(void)
77066 {
77067 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77068 - int *p = (int *)test;
77069 + unsigned int *p = (unsigned int *)test;
77070
77071 if (*p != INT_CONF) {
77072 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77073 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77074 new file mode 100644
77075 index 0000000..8729101
77076 --- /dev/null
77077 +++ b/scripts/gcc-plugin.sh
77078 @@ -0,0 +1,2 @@
77079 +#!/bin/sh
77080 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77081 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77082 index b89efe6..2c30808 100644
77083 --- a/scripts/mod/file2alias.c
77084 +++ b/scripts/mod/file2alias.c
77085 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
77086 unsigned long size, unsigned long id_size,
77087 void *symval)
77088 {
77089 - int i;
77090 + unsigned int i;
77091
77092 if (size % id_size || size < id_size) {
77093 if (cross_build != 0)
77094 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
77095 /* USB is special because the bcdDevice can be matched against a numeric range */
77096 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77097 static void do_usb_entry(struct usb_device_id *id,
77098 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77099 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77100 unsigned char range_lo, unsigned char range_hi,
77101 unsigned char max, struct module *mod)
77102 {
77103 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77104 {
77105 unsigned int devlo, devhi;
77106 unsigned char chi, clo, max;
77107 - int ndigits;
77108 + unsigned int ndigits;
77109
77110 id->match_flags = TO_NATIVE(id->match_flags);
77111 id->idVendor = TO_NATIVE(id->idVendor);
77112 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77113 for (i = 0; i < count; i++) {
77114 const char *id = (char *)devs[i].id;
77115 char acpi_id[sizeof(devs[0].id)];
77116 - int j;
77117 + unsigned int j;
77118
77119 buf_printf(&mod->dev_table_buf,
77120 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77121 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77122
77123 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77124 const char *id = (char *)card->devs[j].id;
77125 - int i2, j2;
77126 + unsigned int i2, j2;
77127 int dup = 0;
77128
77129 if (!id[0])
77130 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77131 /* add an individual alias for every device entry */
77132 if (!dup) {
77133 char acpi_id[sizeof(card->devs[0].id)];
77134 - int k;
77135 + unsigned int k;
77136
77137 buf_printf(&mod->dev_table_buf,
77138 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77139 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77140 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77141 char *alias)
77142 {
77143 - int i, j;
77144 + unsigned int i, j;
77145
77146 sprintf(alias, "dmi*");
77147
77148 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77149 index 9adb667..c6ac044 100644
77150 --- a/scripts/mod/modpost.c
77151 +++ b/scripts/mod/modpost.c
77152 @@ -919,6 +919,7 @@ enum mismatch {
77153 ANY_INIT_TO_ANY_EXIT,
77154 ANY_EXIT_TO_ANY_INIT,
77155 EXPORT_TO_INIT_EXIT,
77156 + DATA_TO_TEXT
77157 };
77158
77159 struct sectioncheck {
77160 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
77161 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77162 .mismatch = EXPORT_TO_INIT_EXIT,
77163 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77164 +},
77165 +/* Do not reference code from writable data */
77166 +{
77167 + .fromsec = { DATA_SECTIONS, NULL },
77168 + .tosec = { TEXT_SECTIONS, NULL },
77169 + .mismatch = DATA_TO_TEXT
77170 }
77171 };
77172
77173 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77174 continue;
77175 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77176 continue;
77177 - if (sym->st_value == addr)
77178 - return sym;
77179 /* Find a symbol nearby - addr are maybe negative */
77180 d = sym->st_value - addr;
77181 + if (d == 0)
77182 + return sym;
77183 if (d < 0)
77184 d = addr - sym->st_value;
77185 if (d < distance) {
77186 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
77187 tosym, prl_to, prl_to, tosym);
77188 free(prl_to);
77189 break;
77190 + case DATA_TO_TEXT:
77191 +/*
77192 + fprintf(stderr,
77193 + "The variable %s references\n"
77194 + "the %s %s%s%s\n",
77195 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77196 +*/
77197 + break;
77198 }
77199 fprintf(stderr, "\n");
77200 }
77201 @@ -1665,7 +1680,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77202 static void check_sec_ref(struct module *mod, const char *modname,
77203 struct elf_info *elf)
77204 {
77205 - int i;
77206 + unsigned int i;
77207 Elf_Shdr *sechdrs = elf->sechdrs;
77208
77209 /* Walk through all sections */
77210 @@ -1763,7 +1778,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77211 va_end(ap);
77212 }
77213
77214 -void buf_write(struct buffer *buf, const char *s, int len)
77215 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77216 {
77217 if (buf->size - buf->pos < len) {
77218 buf->size += len + SZ;
77219 @@ -1981,7 +1996,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77220 if (fstat(fileno(file), &st) < 0)
77221 goto close_write;
77222
77223 - if (st.st_size != b->pos)
77224 + if (st.st_size != (off_t)b->pos)
77225 goto close_write;
77226
77227 tmp = NOFAIL(malloc(b->pos));
77228 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77229 index 2031119..b5433af 100644
77230 --- a/scripts/mod/modpost.h
77231 +++ b/scripts/mod/modpost.h
77232 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77233
77234 struct buffer {
77235 char *p;
77236 - int pos;
77237 - int size;
77238 + unsigned int pos;
77239 + unsigned int size;
77240 };
77241
77242 void __attribute__((format(printf, 2, 3)))
77243 buf_printf(struct buffer *buf, const char *fmt, ...);
77244
77245 void
77246 -buf_write(struct buffer *buf, const char *s, int len);
77247 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77248
77249 struct module {
77250 struct module *next;
77251 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77252 index 9dfcd6d..099068e 100644
77253 --- a/scripts/mod/sumversion.c
77254 +++ b/scripts/mod/sumversion.c
77255 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77256 goto out;
77257 }
77258
77259 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77260 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77261 warn("writing sum in %s failed: %s\n",
77262 filename, strerror(errno));
77263 goto out;
77264 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77265 index 5c11312..72742b5 100644
77266 --- a/scripts/pnmtologo.c
77267 +++ b/scripts/pnmtologo.c
77268 @@ -237,14 +237,14 @@ static void write_header(void)
77269 fprintf(out, " * Linux logo %s\n", logoname);
77270 fputs(" */\n\n", out);
77271 fputs("#include <linux/linux_logo.h>\n\n", out);
77272 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77273 + fprintf(out, "static unsigned char %s_data[] = {\n",
77274 logoname);
77275 }
77276
77277 static void write_footer(void)
77278 {
77279 fputs("\n};\n\n", out);
77280 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77281 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77282 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77283 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77284 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77285 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77286 fputs("\n};\n\n", out);
77287
77288 /* write logo clut */
77289 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77290 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77291 logoname);
77292 write_hex_cnt = 0;
77293 for (i = 0; i < logo_clutsize; i++) {
77294 diff --git a/security/Kconfig b/security/Kconfig
77295 index 51bd5a0..3a4ebd0 100644
77296 --- a/security/Kconfig
77297 +++ b/security/Kconfig
77298 @@ -4,6 +4,627 @@
77299
77300 menu "Security options"
77301
77302 +source grsecurity/Kconfig
77303 +
77304 +menu "PaX"
77305 +
77306 + config ARCH_TRACK_EXEC_LIMIT
77307 + bool
77308 +
77309 + config PAX_KERNEXEC_PLUGIN
77310 + bool
77311 +
77312 + config PAX_PER_CPU_PGD
77313 + bool
77314 +
77315 + config TASK_SIZE_MAX_SHIFT
77316 + int
77317 + depends on X86_64
77318 + default 47 if !PAX_PER_CPU_PGD
77319 + default 42 if PAX_PER_CPU_PGD
77320 +
77321 + config PAX_ENABLE_PAE
77322 + bool
77323 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77324 +
77325 +config PAX
77326 + bool "Enable various PaX features"
77327 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77328 + help
77329 + This allows you to enable various PaX features. PaX adds
77330 + intrusion prevention mechanisms to the kernel that reduce
77331 + the risks posed by exploitable memory corruption bugs.
77332 +
77333 +menu "PaX Control"
77334 + depends on PAX
77335 +
77336 +config PAX_SOFTMODE
77337 + bool 'Support soft mode'
77338 + help
77339 + Enabling this option will allow you to run PaX in soft mode, that
77340 + is, PaX features will not be enforced by default, only on executables
77341 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77342 + support as they are the only way to mark executables for soft mode use.
77343 +
77344 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77345 + line option on boot. Furthermore you can control various PaX features
77346 + at runtime via the entries in /proc/sys/kernel/pax.
77347 +
77348 +config PAX_EI_PAX
77349 + bool 'Use legacy ELF header marking'
77350 + help
77351 + Enabling this option will allow you to control PaX features on
77352 + a per executable basis via the 'chpax' utility available at
77353 + http://pax.grsecurity.net/. The control flags will be read from
77354 + an otherwise reserved part of the ELF header. This marking has
77355 + numerous drawbacks (no support for soft-mode, toolchain does not
77356 + know about the non-standard use of the ELF header) therefore it
77357 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77358 + support.
77359 +
77360 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77361 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77362 + option otherwise they will not get any protection.
77363 +
77364 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77365 + support as well, they will override the legacy EI_PAX marks.
77366 +
77367 +config PAX_PT_PAX_FLAGS
77368 + bool 'Use ELF program header marking'
77369 + help
77370 + Enabling this option will allow you to control PaX features on
77371 + a per executable basis via the 'paxctl' utility available at
77372 + http://pax.grsecurity.net/. The control flags will be read from
77373 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77374 + has the benefits of supporting both soft mode and being fully
77375 + integrated into the toolchain (the binutils patch is available
77376 + from http://pax.grsecurity.net).
77377 +
77378 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77379 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77380 + support otherwise they will not get any protection.
77381 +
77382 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77383 + must make sure that the marks are the same if a binary has both marks.
77384 +
77385 + Note that if you enable the legacy EI_PAX marking support as well,
77386 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77387 +
77388 +config PAX_XATTR_PAX_FLAGS
77389 + bool 'Use filesystem extended attributes marking'
77390 + depends on EXPERT
77391 + select CIFS_XATTR if CIFS
77392 + select EXT2_FS_XATTR if EXT2_FS
77393 + select EXT3_FS_XATTR if EXT3_FS
77394 + select EXT4_FS_XATTR if EXT4_FS
77395 + select JFFS2_FS_XATTR if JFFS2_FS
77396 + select REISERFS_FS_XATTR if REISERFS_FS
77397 + select SQUASHFS_XATTR if SQUASHFS
77398 + select TMPFS_XATTR if TMPFS
77399 + select UBIFS_FS_XATTR if UBIFS_FS
77400 + help
77401 + Enabling this option will allow you to control PaX features on
77402 + a per executable basis via the 'setfattr' utility. The control
77403 + flags will be read from the user.pax.flags extended attribute of
77404 + the file. This marking has the benefit of supporting binary-only
77405 + applications that self-check themselves (e.g., skype) and would
77406 + not tolerate chpax/paxctl changes. The main drawback is that
77407 + extended attributes are not supported by some filesystems (e.g.,
77408 + isofs, udf, vfat) so copying files through such filesystems will
77409 + lose the extended attributes and these PaX markings.
77410 +
77411 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77412 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77413 + support otherwise they will not get any protection.
77414 +
77415 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77416 + must make sure that the marks are the same if a binary has both marks.
77417 +
77418 + Note that if you enable the legacy EI_PAX marking support as well,
77419 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77420 +
77421 +choice
77422 + prompt 'MAC system integration'
77423 + default PAX_HAVE_ACL_FLAGS
77424 + help
77425 + Mandatory Access Control systems have the option of controlling
77426 + PaX flags on a per executable basis, choose the method supported
77427 + by your particular system.
77428 +
77429 + - "none": if your MAC system does not interact with PaX,
77430 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77431 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77432 +
77433 + NOTE: this option is for developers/integrators only.
77434 +
77435 + config PAX_NO_ACL_FLAGS
77436 + bool 'none'
77437 +
77438 + config PAX_HAVE_ACL_FLAGS
77439 + bool 'direct'
77440 +
77441 + config PAX_HOOK_ACL_FLAGS
77442 + bool 'hook'
77443 +endchoice
77444 +
77445 +endmenu
77446 +
77447 +menu "Non-executable pages"
77448 + depends on PAX
77449 +
77450 +config PAX_NOEXEC
77451 + bool "Enforce non-executable pages"
77452 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77453 + help
77454 + By design some architectures do not allow for protecting memory
77455 + pages against execution or even if they do, Linux does not make
77456 + use of this feature. In practice this means that if a page is
77457 + readable (such as the stack or heap) it is also executable.
77458 +
77459 + There is a well known exploit technique that makes use of this
77460 + fact and a common programming mistake where an attacker can
77461 + introduce code of his choice somewhere in the attacked program's
77462 + memory (typically the stack or the heap) and then execute it.
77463 +
77464 + If the attacked program was running with different (typically
77465 + higher) privileges than that of the attacker, then he can elevate
77466 + his own privilege level (e.g. get a root shell, write to files for
77467 + which he does not have write access to, etc).
77468 +
77469 + Enabling this option will let you choose from various features
77470 + that prevent the injection and execution of 'foreign' code in
77471 + a program.
77472 +
77473 + This will also break programs that rely on the old behaviour and
77474 + expect that dynamically allocated memory via the malloc() family
77475 + of functions is executable (which it is not). Notable examples
77476 + are the XFree86 4.x server, the java runtime and wine.
77477 +
77478 +config PAX_PAGEEXEC
77479 + bool "Paging based non-executable pages"
77480 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
77481 + select S390_SWITCH_AMODE if S390
77482 + select S390_EXEC_PROTECT if S390
77483 + select ARCH_TRACK_EXEC_LIMIT if X86_32
77484 + help
77485 + This implementation is based on the paging feature of the CPU.
77486 + On i386 without hardware non-executable bit support there is a
77487 + variable but usually low performance impact, however on Intel's
77488 + P4 core based CPUs it is very high so you should not enable this
77489 + for kernels meant to be used on such CPUs.
77490 +
77491 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
77492 + with hardware non-executable bit support there is no performance
77493 + impact, on ppc the impact is negligible.
77494 +
77495 + Note that several architectures require various emulations due to
77496 + badly designed userland ABIs, this will cause a performance impact
77497 + but will disappear as soon as userland is fixed. For example, ppc
77498 + userland MUST have been built with secure-plt by a recent toolchain.
77499 +
77500 +config PAX_SEGMEXEC
77501 + bool "Segmentation based non-executable pages"
77502 + depends on PAX_NOEXEC && X86_32
77503 + help
77504 + This implementation is based on the segmentation feature of the
77505 + CPU and has a very small performance impact, however applications
77506 + will be limited to a 1.5 GB address space instead of the normal
77507 + 3 GB.
77508 +
77509 +config PAX_EMUTRAMP
77510 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
77511 + default y if PARISC
77512 + help
77513 + There are some programs and libraries that for one reason or
77514 + another attempt to execute special small code snippets from
77515 + non-executable memory pages. Most notable examples are the
77516 + signal handler return code generated by the kernel itself and
77517 + the GCC trampolines.
77518 +
77519 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
77520 + such programs will no longer work under your kernel.
77521 +
77522 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
77523 + utilities to enable trampoline emulation for the affected programs
77524 + yet still have the protection provided by the non-executable pages.
77525 +
77526 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
77527 + your system will not even boot.
77528 +
77529 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
77530 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
77531 + for the affected files.
77532 +
77533 + NOTE: enabling this feature *may* open up a loophole in the
77534 + protection provided by non-executable pages that an attacker
77535 + could abuse. Therefore the best solution is to not have any
77536 + files on your system that would require this option. This can
77537 + be achieved by not using libc5 (which relies on the kernel
77538 + signal handler return code) and not using or rewriting programs
77539 + that make use of the nested function implementation of GCC.
77540 + Skilled users can just fix GCC itself so that it implements
77541 + nested function calls in a way that does not interfere with PaX.
77542 +
77543 +config PAX_EMUSIGRT
77544 + bool "Automatically emulate sigreturn trampolines"
77545 + depends on PAX_EMUTRAMP && PARISC
77546 + default y
77547 + help
77548 + Enabling this option will have the kernel automatically detect
77549 + and emulate signal return trampolines executing on the stack
77550 + that would otherwise lead to task termination.
77551 +
77552 + This solution is intended as a temporary one for users with
77553 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
77554 + Modula-3 runtime, etc) or executables linked to such, basically
77555 + everything that does not specify its own SA_RESTORER function in
77556 + normal executable memory like glibc 2.1+ does.
77557 +
77558 + On parisc you MUST enable this option, otherwise your system will
77559 + not even boot.
77560 +
77561 + NOTE: this feature cannot be disabled on a per executable basis
77562 + and since it *does* open up a loophole in the protection provided
77563 + by non-executable pages, the best solution is to not have any
77564 + files on your system that would require this option.
77565 +
77566 +config PAX_MPROTECT
77567 + bool "Restrict mprotect()"
77568 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
77569 + help
77570 + Enabling this option will prevent programs from
77571 + - changing the executable status of memory pages that were
77572 + not originally created as executable,
77573 + - making read-only executable pages writable again,
77574 + - creating executable pages from anonymous memory,
77575 + - making read-only-after-relocations (RELRO) data pages writable again.
77576 +
77577 + You should say Y here to complete the protection provided by
77578 + the enforcement of non-executable pages.
77579 +
77580 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77581 + this feature on a per file basis.
77582 +
77583 +config PAX_MPROTECT_COMPAT
77584 + bool "Use legacy/compat protection demoting (read help)"
77585 + depends on PAX_MPROTECT
77586 + default n
77587 + help
77588 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
77589 + by sending the proper error code to the application. For some broken
77590 + userland, this can cause problems with Python or other applications. The
77591 + current implementation however allows for applications like clamav to
77592 + detect if JIT compilation/execution is allowed and to fall back gracefully
77593 + to an interpreter-based mode if it does not. While we encourage everyone
77594 + to use the current implementation as-is and push upstream to fix broken
77595 + userland (note that the RWX logging option can assist with this), in some
77596 + environments this may not be possible. Having to disable MPROTECT
77597 + completely on certain binaries reduces the security benefit of PaX,
77598 + so this option is provided for those environments to revert to the old
77599 + behavior.
77600 +
77601 +config PAX_ELFRELOCS
77602 + bool "Allow ELF text relocations (read help)"
77603 + depends on PAX_MPROTECT
77604 + default n
77605 + help
77606 + Non-executable pages and mprotect() restrictions are effective
77607 + in preventing the introduction of new executable code into an
77608 + attacked task's address space. There remain only two venues
77609 + for this kind of attack: if the attacker can execute already
77610 + existing code in the attacked task then he can either have it
77611 + create and mmap() a file containing his code or have it mmap()
77612 + an already existing ELF library that does not have position
77613 + independent code in it and use mprotect() on it to make it
77614 + writable and copy his code there. While protecting against
77615 + the former approach is beyond PaX, the latter can be prevented
77616 + by having only PIC ELF libraries on one's system (which do not
77617 + need to relocate their code). If you are sure this is your case,
77618 + as is the case with all modern Linux distributions, then leave
77619 + this option disabled. You should say 'n' here.
77620 +
77621 +config PAX_ETEXECRELOCS
77622 + bool "Allow ELF ET_EXEC text relocations"
77623 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
77624 + select PAX_ELFRELOCS
77625 + default y
77626 + help
77627 + On some architectures there are incorrectly created applications
77628 + that require text relocations and would not work without enabling
77629 + this option. If you are an alpha, ia64 or parisc user, you should
77630 + enable this option and disable it once you have made sure that
77631 + none of your applications need it.
77632 +
77633 +config PAX_EMUPLT
77634 + bool "Automatically emulate ELF PLT"
77635 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
77636 + default y
77637 + help
77638 + Enabling this option will have the kernel automatically detect
77639 + and emulate the Procedure Linkage Table entries in ELF files.
77640 + On some architectures such entries are in writable memory, and
77641 + become non-executable leading to task termination. Therefore
77642 + it is mandatory that you enable this option on alpha, parisc,
77643 + sparc and sparc64, otherwise your system would not even boot.
77644 +
77645 + NOTE: this feature *does* open up a loophole in the protection
77646 + provided by the non-executable pages, therefore the proper
77647 + solution is to modify the toolchain to produce a PLT that does
77648 + not need to be writable.
77649 +
77650 +config PAX_DLRESOLVE
77651 + bool 'Emulate old glibc resolver stub'
77652 + depends on PAX_EMUPLT && SPARC
77653 + default n
77654 + help
77655 + This option is needed if userland has an old glibc (before 2.4)
77656 + that puts a 'save' instruction into the runtime generated resolver
77657 + stub that needs special emulation.
77658 +
77659 +config PAX_KERNEXEC
77660 + bool "Enforce non-executable kernel pages"
77661 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
77662 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
77663 + select PAX_KERNEXEC_PLUGIN if X86_64
77664 + help
77665 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
77666 + that is, enabling this option will make it harder to inject
77667 + and execute 'foreign' code in kernel memory itself.
77668 +
77669 + Note that on x86_64 kernels there is a known regression when
77670 + this feature and KVM/VMX are both enabled in the host kernel.
77671 +
77672 +choice
77673 + prompt "Return Address Instrumentation Method"
77674 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
77675 + depends on PAX_KERNEXEC_PLUGIN
77676 + help
77677 + Select the method used to instrument function pointer dereferences.
77678 + Note that binary modules cannot be instrumented by this approach.
77679 +
77680 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
77681 + bool "bts"
77682 + help
77683 + This method is compatible with binary only modules but has
77684 + a higher runtime overhead.
77685 +
77686 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
77687 + bool "or"
77688 + depends on !PARAVIRT
77689 + help
77690 + This method is incompatible with binary only modules but has
77691 + a lower runtime overhead.
77692 +endchoice
77693 +
77694 +config PAX_KERNEXEC_PLUGIN_METHOD
77695 + string
77696 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
77697 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
77698 + default ""
77699 +
77700 +config PAX_KERNEXEC_MODULE_TEXT
77701 + int "Minimum amount of memory reserved for module code"
77702 + default "4"
77703 + depends on PAX_KERNEXEC && X86_32 && MODULES
77704 + help
77705 + Due to implementation details the kernel must reserve a fixed
77706 + amount of memory for module code at compile time that cannot be
77707 + changed at runtime. Here you can specify the minimum amount
77708 + in MB that will be reserved. Due to the same implementation
77709 + details this size will always be rounded up to the next 2/4 MB
77710 + boundary (depends on PAE) so the actually available memory for
77711 + module code will usually be more than this minimum.
77712 +
77713 + The default 4 MB should be enough for most users but if you have
77714 + an excessive number of modules (e.g., most distribution configs
77715 + compile many drivers as modules) or use huge modules such as
77716 + nvidia's kernel driver, you will need to adjust this amount.
77717 + A good rule of thumb is to look at your currently loaded kernel
77718 + modules and add up their sizes.
77719 +
77720 +endmenu
77721 +
77722 +menu "Address Space Layout Randomization"
77723 + depends on PAX
77724 +
77725 +config PAX_ASLR
77726 + bool "Address Space Layout Randomization"
77727 + help
77728 + Many if not most exploit techniques rely on the knowledge of
77729 + certain addresses in the attacked program. The following options
77730 + will allow the kernel to apply a certain amount of randomization
77731 + to specific parts of the program thereby forcing an attacker to
77732 + guess them in most cases. Any failed guess will most likely crash
77733 + the attacked program which allows the kernel to detect such attempts
77734 + and react on them. PaX itself provides no reaction mechanisms,
77735 + instead it is strongly encouraged that you make use of Nergal's
77736 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
77737 + (http://www.grsecurity.net/) built-in crash detection features or
77738 + develop one yourself.
77739 +
77740 + By saying Y here you can choose to randomize the following areas:
77741 + - top of the task's kernel stack
77742 + - top of the task's userland stack
77743 + - base address for mmap() requests that do not specify one
77744 + (this includes all libraries)
77745 + - base address of the main executable
77746 +
77747 + It is strongly recommended to say Y here as address space layout
77748 + randomization has negligible impact on performance yet it provides
77749 + a very effective protection.
77750 +
77751 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77752 + this feature on a per file basis.
77753 +
77754 +config PAX_RANDKSTACK
77755 + bool "Randomize kernel stack base"
77756 + depends on X86_TSC && X86
77757 + help
77758 + By saying Y here the kernel will randomize every task's kernel
77759 + stack on every system call. This will not only force an attacker
77760 + to guess it but also prevent him from making use of possible
77761 + leaked information about it.
77762 +
77763 + Since the kernel stack is a rather scarce resource, randomization
77764 + may cause unexpected stack overflows, therefore you should very
77765 + carefully test your system. Note that once enabled in the kernel
77766 + configuration, this feature cannot be disabled on a per file basis.
77767 +
77768 +config PAX_RANDUSTACK
77769 + bool "Randomize user stack base"
77770 + depends on PAX_ASLR
77771 + help
77772 + By saying Y here the kernel will randomize every task's userland
77773 + stack. The randomization is done in two steps where the second
77774 + one may apply a big amount of shift to the top of the stack and
77775 + cause problems for programs that want to use lots of memory (more
77776 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
77777 + For this reason the second step can be controlled by 'chpax' or
77778 + 'paxctl' on a per file basis.
77779 +
77780 +config PAX_RANDMMAP
77781 + bool "Randomize mmap() base"
77782 + depends on PAX_ASLR
77783 + help
77784 + By saying Y here the kernel will use a randomized base address for
77785 + mmap() requests that do not specify one themselves. As a result
77786 + all dynamically loaded libraries will appear at random addresses
77787 + and therefore be harder to exploit by a technique where an attacker
77788 + attempts to execute library code for his purposes (e.g. spawn a
77789 + shell from an exploited program that is running at an elevated
77790 + privilege level).
77791 +
77792 + Furthermore, if a program is relinked as a dynamic ELF file, its
77793 + base address will be randomized as well, completing the full
77794 + randomization of the address space layout. Attacking such programs
77795 + becomes a guess game. You can find an example of doing this at
77796 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
77797 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
77798 +
77799 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
77800 + feature on a per file basis.
77801 +
77802 +endmenu
77803 +
77804 +menu "Miscellaneous hardening features"
77805 +
77806 +config PAX_MEMORY_SANITIZE
77807 + bool "Sanitize all freed memory"
77808 + depends on !HIBERNATION
77809 + help
77810 + By saying Y here the kernel will erase memory pages as soon as they
77811 + are freed. This in turn reduces the lifetime of data stored in the
77812 + pages, making it less likely that sensitive information such as
77813 + passwords, cryptographic secrets, etc stay in memory for too long.
77814 +
77815 + This is especially useful for programs whose runtime is short, long
77816 + lived processes and the kernel itself benefit from this as long as
77817 + they operate on whole memory pages and ensure timely freeing of pages
77818 + that may hold sensitive information.
77819 +
77820 + The tradeoff is performance impact, on a single CPU system kernel
77821 + compilation sees a 3% slowdown, other systems and workloads may vary
77822 + and you are advised to test this feature on your expected workload
77823 + before deploying it.
77824 +
77825 + Note that this feature does not protect data stored in live pages,
77826 + e.g., process memory swapped to disk may stay there for a long time.
77827 +
77828 +config PAX_MEMORY_STACKLEAK
77829 + bool "Sanitize kernel stack"
77830 + depends on X86
77831 + help
77832 + By saying Y here the kernel will erase the kernel stack before it
77833 + returns from a system call. This in turn reduces the information
77834 + that a kernel stack leak bug can reveal.
77835 +
77836 + Note that such a bug can still leak information that was put on
77837 + the stack by the current system call (the one eventually triggering
77838 + the bug) but traces of earlier system calls on the kernel stack
77839 + cannot leak anymore.
77840 +
77841 + The tradeoff is performance impact: on a single CPU system kernel
77842 + compilation sees a 1% slowdown, other systems and workloads may vary
77843 + and you are advised to test this feature on your expected workload
77844 + before deploying it.
77845 +
77846 + Note: full support for this feature requires gcc with plugin support
77847 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
77848 + versions means that functions with large enough stack frames may
77849 + leave uninitialized memory behind that may be exposed to a later
77850 + syscall leaking the stack.
77851 +
77852 +config PAX_MEMORY_UDEREF
77853 + bool "Prevent invalid userland pointer dereference"
77854 + depends on X86 && !UML_X86 && !XEN
77855 + select PAX_PER_CPU_PGD if X86_64
77856 + help
77857 + By saying Y here the kernel will be prevented from dereferencing
77858 + userland pointers in contexts where the kernel expects only kernel
77859 + pointers. This is both a useful runtime debugging feature and a
77860 + security measure that prevents exploiting a class of kernel bugs.
77861 +
77862 + The tradeoff is that some virtualization solutions may experience
77863 + a huge slowdown and therefore you should not enable this feature
77864 + for kernels meant to run in such environments. Whether a given VM
77865 + solution is affected or not is best determined by simply trying it
77866 + out, the performance impact will be obvious right on boot as this
77867 + mechanism engages from very early on. A good rule of thumb is that
77868 + VMs running on CPUs without hardware virtualization support (i.e.,
77869 + the majority of IA-32 CPUs) will likely experience the slowdown.
77870 +
77871 +config PAX_REFCOUNT
77872 + bool "Prevent various kernel object reference counter overflows"
77873 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
77874 + help
77875 + By saying Y here the kernel will detect and prevent overflowing
77876 + various (but not all) kinds of object reference counters. Such
77877 + overflows can normally occur due to bugs only and are often, if
77878 + not always, exploitable.
77879 +
77880 + The tradeoff is that data structures protected by an overflowed
77881 + refcount will never be freed and therefore will leak memory. Note
77882 + that this leak also happens even without this protection but in
77883 + that case the overflow can eventually trigger the freeing of the
77884 + data structure while it is still being used elsewhere, resulting
77885 + in the exploitable situation that this feature prevents.
77886 +
77887 + Since this has a negligible performance impact, you should enable
77888 + this feature.
77889 +
77890 +config PAX_USERCOPY
77891 + bool "Harden heap object copies between kernel and userland"
77892 + depends on X86 || PPC || SPARC || ARM
77893 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
77894 + help
77895 + By saying Y here the kernel will enforce the size of heap objects
77896 + when they are copied in either direction between the kernel and
77897 + userland, even if only a part of the heap object is copied.
77898 +
77899 + Specifically, this checking prevents information leaking from the
77900 + kernel heap during kernel to userland copies (if the kernel heap
77901 + object is otherwise fully initialized) and prevents kernel heap
77902 + overflows during userland to kernel copies.
77903 +
77904 + Note that the current implementation provides the strictest bounds
77905 + checks for the SLUB allocator.
77906 +
77907 + Enabling this option also enables per-slab cache protection against
77908 + data in a given cache being copied into/out of via userland
77909 + accessors. Though the whitelist of regions will be reduced over
77910 + time, it notably protects important data structures like task structs.
77911 +
77912 + If frame pointers are enabled on x86, this option will also restrict
77913 + copies into and out of the kernel stack to local variables within a
77914 + single frame.
77915 +
77916 + Since this has a negligible performance impact, you should enable
77917 + this feature.
77918 +
77919 +endmenu
77920 +
77921 +endmenu
77922 +
77923 config KEYS
77924 bool "Enable access key retention support"
77925 help
77926 @@ -169,7 +790,7 @@ config INTEL_TXT
77927 config LSM_MMAP_MIN_ADDR
77928 int "Low address space for LSM to protect from user allocation"
77929 depends on SECURITY && SECURITY_SELINUX
77930 - default 32768 if ARM
77931 + default 32768 if ALPHA || ARM || PARISC || SPARC32
77932 default 65536
77933 help
77934 This is the portion of low virtual memory which should be protected
77935 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
77936 index 97ce8fa..23dad96 100644
77937 --- a/security/apparmor/lsm.c
77938 +++ b/security/apparmor/lsm.c
77939 @@ -620,7 +620,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
77940 return error;
77941 }
77942
77943 -static struct security_operations apparmor_ops = {
77944 +static struct security_operations apparmor_ops __read_only = {
77945 .name = "apparmor",
77946
77947 .ptrace_access_check = apparmor_ptrace_access_check,
77948 diff --git a/security/commoncap.c b/security/commoncap.c
77949 index 7ce191e..6c29c34 100644
77950 --- a/security/commoncap.c
77951 +++ b/security/commoncap.c
77952 @@ -28,6 +28,7 @@
77953 #include <linux/prctl.h>
77954 #include <linux/securebits.h>
77955 #include <linux/user_namespace.h>
77956 +#include <net/sock.h>
77957
77958 /*
77959 * If a non-root user executes a setuid-root binary in
77960 @@ -569,6 +570,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
77961 {
77962 const struct cred *cred = current_cred();
77963
77964 + if (gr_acl_enable_at_secure())
77965 + return 1;
77966 +
77967 if (cred->uid != 0) {
77968 if (bprm->cap_effective)
77969 return 1;
77970 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
77971 index 3ccf7ac..d73ad64 100644
77972 --- a/security/integrity/ima/ima.h
77973 +++ b/security/integrity/ima/ima.h
77974 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
77975 extern spinlock_t ima_queue_lock;
77976
77977 struct ima_h_table {
77978 - atomic_long_t len; /* number of stored measurements in the list */
77979 - atomic_long_t violations;
77980 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
77981 + atomic_long_unchecked_t violations;
77982 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
77983 };
77984 extern struct ima_h_table ima_htable;
77985 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
77986 index 88a2788..581ab92 100644
77987 --- a/security/integrity/ima/ima_api.c
77988 +++ b/security/integrity/ima/ima_api.c
77989 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
77990 int result;
77991
77992 /* can overflow, only indicator */
77993 - atomic_long_inc(&ima_htable.violations);
77994 + atomic_long_inc_unchecked(&ima_htable.violations);
77995
77996 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
77997 if (!entry) {
77998 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
77999 index e1aa2b4..52027bf 100644
78000 --- a/security/integrity/ima/ima_fs.c
78001 +++ b/security/integrity/ima/ima_fs.c
78002 @@ -28,12 +28,12 @@
78003 static int valid_policy = 1;
78004 #define TMPBUFLEN 12
78005 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78006 - loff_t *ppos, atomic_long_t *val)
78007 + loff_t *ppos, atomic_long_unchecked_t *val)
78008 {
78009 char tmpbuf[TMPBUFLEN];
78010 ssize_t len;
78011
78012 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78013 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78014 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78015 }
78016
78017 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78018 index 55a6271..ad829c3 100644
78019 --- a/security/integrity/ima/ima_queue.c
78020 +++ b/security/integrity/ima/ima_queue.c
78021 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78022 INIT_LIST_HEAD(&qe->later);
78023 list_add_tail_rcu(&qe->later, &ima_measurements);
78024
78025 - atomic_long_inc(&ima_htable.len);
78026 + atomic_long_inc_unchecked(&ima_htable.len);
78027 key = ima_hash_key(entry->digest);
78028 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78029 return 0;
78030 diff --git a/security/keys/compat.c b/security/keys/compat.c
78031 index 4c48e13..7abdac9 100644
78032 --- a/security/keys/compat.c
78033 +++ b/security/keys/compat.c
78034 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78035 if (ret == 0)
78036 goto no_payload_free;
78037
78038 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78039 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78040
78041 if (iov != iovstack)
78042 kfree(iov);
78043 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78044 index 0b3f5d7..892c8a6 100644
78045 --- a/security/keys/keyctl.c
78046 +++ b/security/keys/keyctl.c
78047 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78048 /*
78049 * Copy the iovec data from userspace
78050 */
78051 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78052 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78053 unsigned ioc)
78054 {
78055 for (; ioc > 0; ioc--) {
78056 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78057 * If successful, 0 will be returned.
78058 */
78059 long keyctl_instantiate_key_common(key_serial_t id,
78060 - const struct iovec *payload_iov,
78061 + const struct iovec __user *payload_iov,
78062 unsigned ioc,
78063 size_t plen,
78064 key_serial_t ringid)
78065 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
78066 [0].iov_len = plen
78067 };
78068
78069 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78070 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78071 }
78072
78073 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78074 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78075 if (ret == 0)
78076 goto no_payload_free;
78077
78078 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78079 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78080
78081 if (iov != iovstack)
78082 kfree(iov);
78083 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78084 index d605f75..2bc6be9 100644
78085 --- a/security/keys/keyring.c
78086 +++ b/security/keys/keyring.c
78087 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78088 ret = -EFAULT;
78089
78090 for (loop = 0; loop < klist->nkeys; loop++) {
78091 + key_serial_t serial;
78092 key = klist->keys[loop];
78093 + serial = key->serial;
78094
78095 tmp = sizeof(key_serial_t);
78096 if (tmp > buflen)
78097 tmp = buflen;
78098
78099 - if (copy_to_user(buffer,
78100 - &key->serial,
78101 - tmp) != 0)
78102 + if (copy_to_user(buffer, &serial, tmp))
78103 goto error;
78104
78105 buflen -= tmp;
78106 diff --git a/security/min_addr.c b/security/min_addr.c
78107 index f728728..6457a0c 100644
78108 --- a/security/min_addr.c
78109 +++ b/security/min_addr.c
78110 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78111 */
78112 static void update_mmap_min_addr(void)
78113 {
78114 +#ifndef SPARC
78115 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78116 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78117 mmap_min_addr = dac_mmap_min_addr;
78118 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78119 #else
78120 mmap_min_addr = dac_mmap_min_addr;
78121 #endif
78122 +#endif
78123 }
78124
78125 /*
78126 diff --git a/security/security.c b/security/security.c
78127 index d754249..8bf426e 100644
78128 --- a/security/security.c
78129 +++ b/security/security.c
78130 @@ -26,8 +26,8 @@
78131 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78132 CONFIG_DEFAULT_SECURITY;
78133
78134 -static struct security_operations *security_ops;
78135 -static struct security_operations default_security_ops = {
78136 +static struct security_operations *security_ops __read_only;
78137 +static struct security_operations default_security_ops __read_only = {
78138 .name = "default",
78139 };
78140
78141 @@ -68,7 +68,9 @@ int __init security_init(void)
78142
78143 void reset_security_ops(void)
78144 {
78145 + pax_open_kernel();
78146 security_ops = &default_security_ops;
78147 + pax_close_kernel();
78148 }
78149
78150 /* Save user chosen LSM */
78151 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78152 index 6a3683e..f52f4c0 100644
78153 --- a/security/selinux/hooks.c
78154 +++ b/security/selinux/hooks.c
78155 @@ -94,8 +94,6 @@
78156
78157 #define NUM_SEL_MNT_OPTS 5
78158
78159 -extern struct security_operations *security_ops;
78160 -
78161 /* SECMARK reference count */
78162 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78163
78164 @@ -5429,7 +5427,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78165
78166 #endif
78167
78168 -static struct security_operations selinux_ops = {
78169 +static struct security_operations selinux_ops __read_only = {
78170 .name = "selinux",
78171
78172 .ptrace_access_check = selinux_ptrace_access_check,
78173 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78174 index b43813c..74be837 100644
78175 --- a/security/selinux/include/xfrm.h
78176 +++ b/security/selinux/include/xfrm.h
78177 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78178
78179 static inline void selinux_xfrm_notify_policyload(void)
78180 {
78181 - atomic_inc(&flow_cache_genid);
78182 + atomic_inc_unchecked(&flow_cache_genid);
78183 }
78184 #else
78185 static inline int selinux_xfrm_enabled(void)
78186 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78187 index e8af5b0b..78527ef 100644
78188 --- a/security/smack/smack_lsm.c
78189 +++ b/security/smack/smack_lsm.c
78190 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78191 return 0;
78192 }
78193
78194 -struct security_operations smack_ops = {
78195 +struct security_operations smack_ops __read_only = {
78196 .name = "smack",
78197
78198 .ptrace_access_check = smack_ptrace_access_check,
78199 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78200 index 620d37c..e2ad89b 100644
78201 --- a/security/tomoyo/tomoyo.c
78202 +++ b/security/tomoyo/tomoyo.c
78203 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78204 * tomoyo_security_ops is a "struct security_operations" which is used for
78205 * registering TOMOYO.
78206 */
78207 -static struct security_operations tomoyo_security_ops = {
78208 +static struct security_operations tomoyo_security_ops __read_only = {
78209 .name = "tomoyo",
78210 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78211 .cred_prepare = tomoyo_cred_prepare,
78212 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78213 index 762af68..7103453 100644
78214 --- a/sound/aoa/codecs/onyx.c
78215 +++ b/sound/aoa/codecs/onyx.c
78216 @@ -54,7 +54,7 @@ struct onyx {
78217 spdif_locked:1,
78218 analog_locked:1,
78219 original_mute:2;
78220 - int open_count;
78221 + local_t open_count;
78222 struct codec_info *codec_info;
78223
78224 /* mutex serializes concurrent access to the device
78225 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78226 struct onyx *onyx = cii->codec_data;
78227
78228 mutex_lock(&onyx->mutex);
78229 - onyx->open_count++;
78230 + local_inc(&onyx->open_count);
78231 mutex_unlock(&onyx->mutex);
78232
78233 return 0;
78234 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78235 struct onyx *onyx = cii->codec_data;
78236
78237 mutex_lock(&onyx->mutex);
78238 - onyx->open_count--;
78239 - if (!onyx->open_count)
78240 + if (local_dec_and_test(&onyx->open_count))
78241 onyx->spdif_locked = onyx->analog_locked = 0;
78242 mutex_unlock(&onyx->mutex);
78243
78244 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78245 index ffd2025..df062c9 100644
78246 --- a/sound/aoa/codecs/onyx.h
78247 +++ b/sound/aoa/codecs/onyx.h
78248 @@ -11,6 +11,7 @@
78249 #include <linux/i2c.h>
78250 #include <asm/pmac_low_i2c.h>
78251 #include <asm/prom.h>
78252 +#include <asm/local.h>
78253
78254 /* PCM3052 register definitions */
78255
78256 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78257 index 08fde00..0bf641a 100644
78258 --- a/sound/core/oss/pcm_oss.c
78259 +++ b/sound/core/oss/pcm_oss.c
78260 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78261 if (in_kernel) {
78262 mm_segment_t fs;
78263 fs = snd_enter_user();
78264 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78265 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78266 snd_leave_user(fs);
78267 } else {
78268 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78269 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78270 }
78271 if (ret != -EPIPE && ret != -ESTRPIPE)
78272 break;
78273 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78274 if (in_kernel) {
78275 mm_segment_t fs;
78276 fs = snd_enter_user();
78277 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78278 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78279 snd_leave_user(fs);
78280 } else {
78281 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78282 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78283 }
78284 if (ret == -EPIPE) {
78285 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78286 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78287 struct snd_pcm_plugin_channel *channels;
78288 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78289 if (!in_kernel) {
78290 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78291 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78292 return -EFAULT;
78293 buf = runtime->oss.buffer;
78294 }
78295 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78296 }
78297 } else {
78298 tmp = snd_pcm_oss_write2(substream,
78299 - (const char __force *)buf,
78300 + (const char __force_kernel *)buf,
78301 runtime->oss.period_bytes, 0);
78302 if (tmp <= 0)
78303 goto err;
78304 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78305 struct snd_pcm_runtime *runtime = substream->runtime;
78306 snd_pcm_sframes_t frames, frames1;
78307 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78308 - char __user *final_dst = (char __force __user *)buf;
78309 + char __user *final_dst = (char __force_user *)buf;
78310 if (runtime->oss.plugin_first) {
78311 struct snd_pcm_plugin_channel *channels;
78312 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78313 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78314 xfer += tmp;
78315 runtime->oss.buffer_used -= tmp;
78316 } else {
78317 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78318 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78319 runtime->oss.period_bytes, 0);
78320 if (tmp <= 0)
78321 goto err;
78322 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78323 size1);
78324 size1 /= runtime->channels; /* frames */
78325 fs = snd_enter_user();
78326 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78327 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78328 snd_leave_user(fs);
78329 }
78330 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78331 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78332 index 91cdf94..4085161 100644
78333 --- a/sound/core/pcm_compat.c
78334 +++ b/sound/core/pcm_compat.c
78335 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78336 int err;
78337
78338 fs = snd_enter_user();
78339 - err = snd_pcm_delay(substream, &delay);
78340 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78341 snd_leave_user(fs);
78342 if (err < 0)
78343 return err;
78344 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78345 index 25ed9fe..24c46e9 100644
78346 --- a/sound/core/pcm_native.c
78347 +++ b/sound/core/pcm_native.c
78348 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78349 switch (substream->stream) {
78350 case SNDRV_PCM_STREAM_PLAYBACK:
78351 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78352 - (void __user *)arg);
78353 + (void __force_user *)arg);
78354 break;
78355 case SNDRV_PCM_STREAM_CAPTURE:
78356 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78357 - (void __user *)arg);
78358 + (void __force_user *)arg);
78359 break;
78360 default:
78361 result = -EINVAL;
78362 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78363 index 5cf8d65..912a79c 100644
78364 --- a/sound/core/seq/seq_device.c
78365 +++ b/sound/core/seq/seq_device.c
78366 @@ -64,7 +64,7 @@ struct ops_list {
78367 int argsize; /* argument size */
78368
78369 /* operators */
78370 - struct snd_seq_dev_ops ops;
78371 + struct snd_seq_dev_ops *ops;
78372
78373 /* registred devices */
78374 struct list_head dev_list; /* list of devices */
78375 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78376
78377 mutex_lock(&ops->reg_mutex);
78378 /* copy driver operators */
78379 - ops->ops = *entry;
78380 + ops->ops = entry;
78381 ops->driver |= DRIVER_LOADED;
78382 ops->argsize = argsize;
78383
78384 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78385 dev->name, ops->id, ops->argsize, dev->argsize);
78386 return -EINVAL;
78387 }
78388 - if (ops->ops.init_device(dev) >= 0) {
78389 + if (ops->ops->init_device(dev) >= 0) {
78390 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78391 ops->num_init_devices++;
78392 } else {
78393 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
78394 dev->name, ops->id, ops->argsize, dev->argsize);
78395 return -EINVAL;
78396 }
78397 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
78398 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
78399 dev->status = SNDRV_SEQ_DEVICE_FREE;
78400 dev->driver_data = NULL;
78401 ops->num_init_devices--;
78402 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
78403 index 621e60e..f4543f5 100644
78404 --- a/sound/drivers/mts64.c
78405 +++ b/sound/drivers/mts64.c
78406 @@ -29,6 +29,7 @@
78407 #include <sound/initval.h>
78408 #include <sound/rawmidi.h>
78409 #include <sound/control.h>
78410 +#include <asm/local.h>
78411
78412 #define CARD_NAME "Miditerminal 4140"
78413 #define DRIVER_NAME "MTS64"
78414 @@ -67,7 +68,7 @@ struct mts64 {
78415 struct pardevice *pardev;
78416 int pardev_claimed;
78417
78418 - int open_count;
78419 + local_t open_count;
78420 int current_midi_output_port;
78421 int current_midi_input_port;
78422 u8 mode[MTS64_NUM_INPUT_PORTS];
78423 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78424 {
78425 struct mts64 *mts = substream->rmidi->private_data;
78426
78427 - if (mts->open_count == 0) {
78428 + if (local_read(&mts->open_count) == 0) {
78429 /* We don't need a spinlock here, because this is just called
78430 if the device has not been opened before.
78431 So there aren't any IRQs from the device */
78432 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78433
78434 msleep(50);
78435 }
78436 - ++(mts->open_count);
78437 + local_inc(&mts->open_count);
78438
78439 return 0;
78440 }
78441 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78442 struct mts64 *mts = substream->rmidi->private_data;
78443 unsigned long flags;
78444
78445 - --(mts->open_count);
78446 - if (mts->open_count == 0) {
78447 + if (local_dec_return(&mts->open_count) == 0) {
78448 /* We need the spinlock_irqsave here because we can still
78449 have IRQs at this point */
78450 spin_lock_irqsave(&mts->lock, flags);
78451 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78452
78453 msleep(500);
78454
78455 - } else if (mts->open_count < 0)
78456 - mts->open_count = 0;
78457 + } else if (local_read(&mts->open_count) < 0)
78458 + local_set(&mts->open_count, 0);
78459
78460 return 0;
78461 }
78462 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
78463 index b953fb4..1999c01 100644
78464 --- a/sound/drivers/opl4/opl4_lib.c
78465 +++ b/sound/drivers/opl4/opl4_lib.c
78466 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
78467 MODULE_DESCRIPTION("OPL4 driver");
78468 MODULE_LICENSE("GPL");
78469
78470 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
78471 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
78472 {
78473 int timeout = 10;
78474 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
78475 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
78476 index 3e32bd3..46fc152 100644
78477 --- a/sound/drivers/portman2x4.c
78478 +++ b/sound/drivers/portman2x4.c
78479 @@ -48,6 +48,7 @@
78480 #include <sound/initval.h>
78481 #include <sound/rawmidi.h>
78482 #include <sound/control.h>
78483 +#include <asm/local.h>
78484
78485 #define CARD_NAME "Portman 2x4"
78486 #define DRIVER_NAME "portman"
78487 @@ -85,7 +86,7 @@ struct portman {
78488 struct pardevice *pardev;
78489 int pardev_claimed;
78490
78491 - int open_count;
78492 + local_t open_count;
78493 int mode[PORTMAN_NUM_INPUT_PORTS];
78494 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
78495 };
78496 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
78497 index 87657dd..a8268d4 100644
78498 --- a/sound/firewire/amdtp.c
78499 +++ b/sound/firewire/amdtp.c
78500 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
78501 ptr = s->pcm_buffer_pointer + data_blocks;
78502 if (ptr >= pcm->runtime->buffer_size)
78503 ptr -= pcm->runtime->buffer_size;
78504 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
78505 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
78506
78507 s->pcm_period_pointer += data_blocks;
78508 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
78509 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
78510 */
78511 void amdtp_out_stream_update(struct amdtp_out_stream *s)
78512 {
78513 - ACCESS_ONCE(s->source_node_id_field) =
78514 + ACCESS_ONCE_RW(s->source_node_id_field) =
78515 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
78516 }
78517 EXPORT_SYMBOL(amdtp_out_stream_update);
78518 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
78519 index 537a9cb..8e8c8e9 100644
78520 --- a/sound/firewire/amdtp.h
78521 +++ b/sound/firewire/amdtp.h
78522 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
78523 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
78524 struct snd_pcm_substream *pcm)
78525 {
78526 - ACCESS_ONCE(s->pcm) = pcm;
78527 + ACCESS_ONCE_RW(s->pcm) = pcm;
78528 }
78529
78530 /**
78531 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
78532 index cd094ec..eca1277 100644
78533 --- a/sound/firewire/isight.c
78534 +++ b/sound/firewire/isight.c
78535 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
78536 ptr += count;
78537 if (ptr >= runtime->buffer_size)
78538 ptr -= runtime->buffer_size;
78539 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
78540 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
78541
78542 isight->period_counter += count;
78543 if (isight->period_counter >= runtime->period_size) {
78544 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
78545 if (err < 0)
78546 return err;
78547
78548 - ACCESS_ONCE(isight->pcm_active) = true;
78549 + ACCESS_ONCE_RW(isight->pcm_active) = true;
78550
78551 return 0;
78552 }
78553 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
78554 {
78555 struct isight *isight = substream->private_data;
78556
78557 - ACCESS_ONCE(isight->pcm_active) = false;
78558 + ACCESS_ONCE_RW(isight->pcm_active) = false;
78559
78560 mutex_lock(&isight->mutex);
78561 isight_stop_streaming(isight);
78562 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
78563
78564 switch (cmd) {
78565 case SNDRV_PCM_TRIGGER_START:
78566 - ACCESS_ONCE(isight->pcm_running) = true;
78567 + ACCESS_ONCE_RW(isight->pcm_running) = true;
78568 break;
78569 case SNDRV_PCM_TRIGGER_STOP:
78570 - ACCESS_ONCE(isight->pcm_running) = false;
78571 + ACCESS_ONCE_RW(isight->pcm_running) = false;
78572 break;
78573 default:
78574 return -EINVAL;
78575 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
78576 index 7bd5e33..1fcab12 100644
78577 --- a/sound/isa/cmi8330.c
78578 +++ b/sound/isa/cmi8330.c
78579 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
78580
78581 struct snd_pcm *pcm;
78582 struct snd_cmi8330_stream {
78583 - struct snd_pcm_ops ops;
78584 + snd_pcm_ops_no_const ops;
78585 snd_pcm_open_callback_t open;
78586 void *private_data; /* sb or wss */
78587 } streams[2];
78588 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
78589 index 733b014..56ce96f 100644
78590 --- a/sound/oss/sb_audio.c
78591 +++ b/sound/oss/sb_audio.c
78592 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
78593 buf16 = (signed short *)(localbuf + localoffs);
78594 while (c)
78595 {
78596 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78597 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78598 if (copy_from_user(lbuf8,
78599 userbuf+useroffs + p,
78600 locallen))
78601 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
78602 index 09d4648..cf234c7 100644
78603 --- a/sound/oss/swarm_cs4297a.c
78604 +++ b/sound/oss/swarm_cs4297a.c
78605 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
78606 {
78607 struct cs4297a_state *s;
78608 u32 pwr, id;
78609 - mm_segment_t fs;
78610 int rval;
78611 #ifndef CONFIG_BCM_CS4297A_CSWARM
78612 u64 cfg;
78613 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
78614 if (!rval) {
78615 char *sb1250_duart_present;
78616
78617 +#if 0
78618 + mm_segment_t fs;
78619 fs = get_fs();
78620 set_fs(KERNEL_DS);
78621 -#if 0
78622 val = SOUND_MASK_LINE;
78623 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
78624 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
78625 val = initvol[i].vol;
78626 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
78627 }
78628 + set_fs(fs);
78629 // cs4297a_write_ac97(s, 0x18, 0x0808);
78630 #else
78631 // cs4297a_write_ac97(s, 0x5e, 0x180);
78632 cs4297a_write_ac97(s, 0x02, 0x0808);
78633 cs4297a_write_ac97(s, 0x18, 0x0808);
78634 #endif
78635 - set_fs(fs);
78636
78637 list_add(&s->list, &cs4297a_devs);
78638
78639 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
78640 index f0f1943..8e1f96c 100644
78641 --- a/sound/pci/hda/hda_codec.h
78642 +++ b/sound/pci/hda/hda_codec.h
78643 @@ -611,7 +611,7 @@ struct hda_bus_ops {
78644 /* notify power-up/down from codec to controller */
78645 void (*pm_notify)(struct hda_bus *bus);
78646 #endif
78647 -};
78648 +} __no_const;
78649
78650 /* template to pass to the bus constructor */
78651 struct hda_bus_template {
78652 @@ -713,6 +713,7 @@ struct hda_codec_ops {
78653 #endif
78654 void (*reboot_notify)(struct hda_codec *codec);
78655 };
78656 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
78657
78658 /* record for amp information cache */
78659 struct hda_cache_head {
78660 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
78661 struct snd_pcm_substream *substream);
78662 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
78663 struct snd_pcm_substream *substream);
78664 -};
78665 +} __no_const;
78666
78667 /* PCM information for each substream */
78668 struct hda_pcm_stream {
78669 @@ -801,7 +802,7 @@ struct hda_codec {
78670 const char *modelname; /* model name for preset */
78671
78672 /* set by patch */
78673 - struct hda_codec_ops patch_ops;
78674 + hda_codec_ops_no_const patch_ops;
78675
78676 /* PCM to create, set by patch_ops.build_pcms callback */
78677 unsigned int num_pcms;
78678 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
78679 index 0da778a..bc38b84 100644
78680 --- a/sound/pci/ice1712/ice1712.h
78681 +++ b/sound/pci/ice1712/ice1712.h
78682 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
78683 unsigned int mask_flags; /* total mask bits */
78684 struct snd_akm4xxx_ops {
78685 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
78686 - } ops;
78687 + } __no_const ops;
78688 };
78689
78690 struct snd_ice1712_spdif {
78691 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
78692 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78693 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78694 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78695 - } ops;
78696 + } __no_const ops;
78697 };
78698
78699
78700 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
78701 index 12a9a2b..2b6138f 100644
78702 --- a/sound/pci/ymfpci/ymfpci_main.c
78703 +++ b/sound/pci/ymfpci/ymfpci_main.c
78704 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
78705 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
78706 break;
78707 }
78708 - if (atomic_read(&chip->interrupt_sleep_count)) {
78709 - atomic_set(&chip->interrupt_sleep_count, 0);
78710 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78711 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78712 wake_up(&chip->interrupt_sleep);
78713 }
78714 __end:
78715 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
78716 continue;
78717 init_waitqueue_entry(&wait, current);
78718 add_wait_queue(&chip->interrupt_sleep, &wait);
78719 - atomic_inc(&chip->interrupt_sleep_count);
78720 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
78721 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
78722 remove_wait_queue(&chip->interrupt_sleep, &wait);
78723 }
78724 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
78725 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
78726 spin_unlock(&chip->reg_lock);
78727
78728 - if (atomic_read(&chip->interrupt_sleep_count)) {
78729 - atomic_set(&chip->interrupt_sleep_count, 0);
78730 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78731 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78732 wake_up(&chip->interrupt_sleep);
78733 }
78734 }
78735 @@ -2389,7 +2389,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
78736 spin_lock_init(&chip->reg_lock);
78737 spin_lock_init(&chip->voice_lock);
78738 init_waitqueue_head(&chip->interrupt_sleep);
78739 - atomic_set(&chip->interrupt_sleep_count, 0);
78740 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78741 chip->card = card;
78742 chip->pci = pci;
78743 chip->irq = -1;
78744 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
78745 index cdc860a..db34a93 100644
78746 --- a/sound/soc/soc-pcm.c
78747 +++ b/sound/soc/soc-pcm.c
78748 @@ -605,7 +605,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
78749 struct snd_soc_platform *platform = rtd->platform;
78750 struct snd_soc_dai *codec_dai = rtd->codec_dai;
78751 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
78752 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
78753 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
78754 struct snd_pcm *pcm;
78755 char new_name[64];
78756 int ret = 0, playback = 0, capture = 0;
78757 diff --git a/sound/usb/card.h b/sound/usb/card.h
78758 index da5fa1a..113cd02 100644
78759 --- a/sound/usb/card.h
78760 +++ b/sound/usb/card.h
78761 @@ -45,6 +45,7 @@ struct snd_urb_ops {
78762 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
78763 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
78764 };
78765 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
78766
78767 struct snd_usb_substream {
78768 struct snd_usb_stream *stream;
78769 @@ -94,7 +95,7 @@ struct snd_usb_substream {
78770 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
78771 spinlock_t lock;
78772
78773 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
78774 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
78775 int last_frame_number; /* stored frame number */
78776 int last_delay; /* stored delay */
78777 };
78778 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
78779 new file mode 100644
78780 index 0000000..894c8bf
78781 --- /dev/null
78782 +++ b/tools/gcc/Makefile
78783 @@ -0,0 +1,23 @@
78784 +#CC := gcc
78785 +#PLUGIN_SOURCE_FILES := pax_plugin.c
78786 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
78787 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
78788 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
78789 +
78790 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
78791 +
78792 +hostlibs-y := constify_plugin.so
78793 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
78794 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
78795 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
78796 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
78797 +hostlibs-y += colorize_plugin.so
78798 +
78799 +always := $(hostlibs-y)
78800 +
78801 +constify_plugin-objs := constify_plugin.o
78802 +stackleak_plugin-objs := stackleak_plugin.o
78803 +kallocstat_plugin-objs := kallocstat_plugin.o
78804 +kernexec_plugin-objs := kernexec_plugin.o
78805 +checker_plugin-objs := checker_plugin.o
78806 +colorize_plugin-objs := colorize_plugin.o
78807 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
78808 new file mode 100644
78809 index 0000000..d41b5af
78810 --- /dev/null
78811 +++ b/tools/gcc/checker_plugin.c
78812 @@ -0,0 +1,171 @@
78813 +/*
78814 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78815 + * Licensed under the GPL v2
78816 + *
78817 + * Note: the choice of the license means that the compilation process is
78818 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78819 + * but for the kernel it doesn't matter since it doesn't link against
78820 + * any of the gcc libraries
78821 + *
78822 + * gcc plugin to implement various sparse (source code checker) features
78823 + *
78824 + * TODO:
78825 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
78826 + *
78827 + * BUGS:
78828 + * - none known
78829 + */
78830 +#include "gcc-plugin.h"
78831 +#include "config.h"
78832 +#include "system.h"
78833 +#include "coretypes.h"
78834 +#include "tree.h"
78835 +#include "tree-pass.h"
78836 +#include "flags.h"
78837 +#include "intl.h"
78838 +#include "toplev.h"
78839 +#include "plugin.h"
78840 +//#include "expr.h" where are you...
78841 +#include "diagnostic.h"
78842 +#include "plugin-version.h"
78843 +#include "tm.h"
78844 +#include "function.h"
78845 +#include "basic-block.h"
78846 +#include "gimple.h"
78847 +#include "rtl.h"
78848 +#include "emit-rtl.h"
78849 +#include "tree-flow.h"
78850 +#include "target.h"
78851 +
78852 +extern void c_register_addr_space (const char *str, addr_space_t as);
78853 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
78854 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
78855 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
78856 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
78857 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
78858 +
78859 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78860 +extern rtx emit_move_insn(rtx x, rtx y);
78861 +
78862 +int plugin_is_GPL_compatible;
78863 +
78864 +static struct plugin_info checker_plugin_info = {
78865 + .version = "201111150100",
78866 +};
78867 +
78868 +#define ADDR_SPACE_KERNEL 0
78869 +#define ADDR_SPACE_FORCE_KERNEL 1
78870 +#define ADDR_SPACE_USER 2
78871 +#define ADDR_SPACE_FORCE_USER 3
78872 +#define ADDR_SPACE_IOMEM 0
78873 +#define ADDR_SPACE_FORCE_IOMEM 0
78874 +#define ADDR_SPACE_PERCPU 0
78875 +#define ADDR_SPACE_FORCE_PERCPU 0
78876 +#define ADDR_SPACE_RCU 0
78877 +#define ADDR_SPACE_FORCE_RCU 0
78878 +
78879 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
78880 +{
78881 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
78882 +}
78883 +
78884 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
78885 +{
78886 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
78887 +}
78888 +
78889 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
78890 +{
78891 + return default_addr_space_valid_pointer_mode(mode, as);
78892 +}
78893 +
78894 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
78895 +{
78896 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
78897 +}
78898 +
78899 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
78900 +{
78901 + return default_addr_space_legitimize_address(x, oldx, mode, as);
78902 +}
78903 +
78904 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
78905 +{
78906 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
78907 + return true;
78908 +
78909 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
78910 + return true;
78911 +
78912 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
78913 + return true;
78914 +
78915 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
78916 + return true;
78917 +
78918 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
78919 + return true;
78920 +
78921 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
78922 + return true;
78923 +
78924 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
78925 + return true;
78926 +
78927 + return subset == superset;
78928 +}
78929 +
78930 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
78931 +{
78932 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
78933 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
78934 +
78935 + return op;
78936 +}
78937 +
78938 +static void register_checker_address_spaces(void *event_data, void *data)
78939 +{
78940 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
78941 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
78942 + c_register_addr_space("__user", ADDR_SPACE_USER);
78943 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
78944 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
78945 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
78946 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
78947 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
78948 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
78949 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
78950 +
78951 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
78952 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
78953 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
78954 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
78955 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
78956 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
78957 + targetm.addr_space.convert = checker_addr_space_convert;
78958 +}
78959 +
78960 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78961 +{
78962 + const char * const plugin_name = plugin_info->base_name;
78963 + const int argc = plugin_info->argc;
78964 + const struct plugin_argument * const argv = plugin_info->argv;
78965 + int i;
78966 +
78967 + if (!plugin_default_version_check(version, &gcc_version)) {
78968 + error(G_("incompatible gcc/plugin versions"));
78969 + return 1;
78970 + }
78971 +
78972 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
78973 +
78974 + for (i = 0; i < argc; ++i)
78975 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78976 +
78977 + if (TARGET_64BIT == 0)
78978 + return 0;
78979 +
78980 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
78981 +
78982 + return 0;
78983 +}
78984 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
78985 new file mode 100644
78986 index 0000000..ee950d0
78987 --- /dev/null
78988 +++ b/tools/gcc/colorize_plugin.c
78989 @@ -0,0 +1,147 @@
78990 +/*
78991 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
78992 + * Licensed under the GPL v2
78993 + *
78994 + * Note: the choice of the license means that the compilation process is
78995 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78996 + * but for the kernel it doesn't matter since it doesn't link against
78997 + * any of the gcc libraries
78998 + *
78999 + * gcc plugin to colorize diagnostic output
79000 + *
79001 + */
79002 +
79003 +#include "gcc-plugin.h"
79004 +#include "config.h"
79005 +#include "system.h"
79006 +#include "coretypes.h"
79007 +#include "tree.h"
79008 +#include "tree-pass.h"
79009 +#include "flags.h"
79010 +#include "intl.h"
79011 +#include "toplev.h"
79012 +#include "plugin.h"
79013 +#include "diagnostic.h"
79014 +#include "plugin-version.h"
79015 +#include "tm.h"
79016 +
79017 +int plugin_is_GPL_compatible;
79018 +
79019 +static struct plugin_info colorize_plugin_info = {
79020 + .version = "201203092200",
79021 +};
79022 +
79023 +#define GREEN "\033[32m\033[2m"
79024 +#define LIGHTGREEN "\033[32m\033[1m"
79025 +#define YELLOW "\033[33m\033[2m"
79026 +#define LIGHTYELLOW "\033[33m\033[1m"
79027 +#define RED "\033[31m\033[2m"
79028 +#define LIGHTRED "\033[31m\033[1m"
79029 +#define BLUE "\033[34m\033[2m"
79030 +#define LIGHTBLUE "\033[34m\033[1m"
79031 +#define BRIGHT "\033[m\033[1m"
79032 +#define NORMAL "\033[m"
79033 +
79034 +static diagnostic_starter_fn old_starter;
79035 +static diagnostic_finalizer_fn old_finalizer;
79036 +
79037 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79038 +{
79039 + const char *color;
79040 + char *newprefix;
79041 +
79042 + switch (diagnostic->kind) {
79043 + case DK_NOTE:
79044 + color = LIGHTBLUE;
79045 + break;
79046 +
79047 + case DK_PEDWARN:
79048 + case DK_WARNING:
79049 + color = LIGHTYELLOW;
79050 + break;
79051 +
79052 + case DK_ERROR:
79053 + case DK_FATAL:
79054 + case DK_ICE:
79055 + case DK_PERMERROR:
79056 + case DK_SORRY:
79057 + color = LIGHTRED;
79058 + break;
79059 +
79060 + default:
79061 + color = NORMAL;
79062 + }
79063 +
79064 + old_starter(context, diagnostic);
79065 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79066 + return;
79067 + pp_destroy_prefix(context->printer);
79068 + pp_set_prefix(context->printer, newprefix);
79069 +}
79070 +
79071 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79072 +{
79073 + old_finalizer(context, diagnostic);
79074 +}
79075 +
79076 +static void colorize_arm(void)
79077 +{
79078 + old_starter = diagnostic_starter(global_dc);
79079 + old_finalizer = diagnostic_finalizer(global_dc);
79080 +
79081 + diagnostic_starter(global_dc) = start_colorize;
79082 + diagnostic_finalizer(global_dc) = finalize_colorize;
79083 +}
79084 +
79085 +static unsigned int execute_colorize_rearm(void)
79086 +{
79087 + if (diagnostic_starter(global_dc) == start_colorize)
79088 + return 0;
79089 +
79090 + colorize_arm();
79091 + return 0;
79092 +}
79093 +
79094 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79095 + .pass = {
79096 + .type = SIMPLE_IPA_PASS,
79097 + .name = "colorize_rearm",
79098 + .gate = NULL,
79099 + .execute = execute_colorize_rearm,
79100 + .sub = NULL,
79101 + .next = NULL,
79102 + .static_pass_number = 0,
79103 + .tv_id = TV_NONE,
79104 + .properties_required = 0,
79105 + .properties_provided = 0,
79106 + .properties_destroyed = 0,
79107 + .todo_flags_start = 0,
79108 + .todo_flags_finish = 0
79109 + }
79110 +};
79111 +
79112 +static void colorize_start_unit(void *gcc_data, void *user_data)
79113 +{
79114 + colorize_arm();
79115 +}
79116 +
79117 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79118 +{
79119 + const char * const plugin_name = plugin_info->base_name;
79120 + struct register_pass_info colorize_rearm_pass_info = {
79121 + .pass = &pass_ipa_colorize_rearm.pass,
79122 + .reference_pass_name = "*free_lang_data",
79123 + .ref_pass_instance_number = 0,
79124 + .pos_op = PASS_POS_INSERT_AFTER
79125 + };
79126 +
79127 + if (!plugin_default_version_check(version, &gcc_version)) {
79128 + error(G_("incompatible gcc/plugin versions"));
79129 + return 1;
79130 + }
79131 +
79132 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79133 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79134 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79135 + return 0;
79136 +}
79137 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79138 new file mode 100644
79139 index 0000000..704a564
79140 --- /dev/null
79141 +++ b/tools/gcc/constify_plugin.c
79142 @@ -0,0 +1,303 @@
79143 +/*
79144 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79145 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79146 + * Licensed under the GPL v2, or (at your option) v3
79147 + *
79148 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79149 + *
79150 + * Homepage:
79151 + * http://www.grsecurity.net/~ephox/const_plugin/
79152 + *
79153 + * Usage:
79154 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79155 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79156 + */
79157 +
79158 +#include "gcc-plugin.h"
79159 +#include "config.h"
79160 +#include "system.h"
79161 +#include "coretypes.h"
79162 +#include "tree.h"
79163 +#include "tree-pass.h"
79164 +#include "flags.h"
79165 +#include "intl.h"
79166 +#include "toplev.h"
79167 +#include "plugin.h"
79168 +#include "diagnostic.h"
79169 +#include "plugin-version.h"
79170 +#include "tm.h"
79171 +#include "function.h"
79172 +#include "basic-block.h"
79173 +#include "gimple.h"
79174 +#include "rtl.h"
79175 +#include "emit-rtl.h"
79176 +#include "tree-flow.h"
79177 +
79178 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79179 +
79180 +int plugin_is_GPL_compatible;
79181 +
79182 +static struct plugin_info const_plugin_info = {
79183 + .version = "201111150100",
79184 + .help = "no-constify\tturn off constification\n",
79185 +};
79186 +
79187 +static void constify_type(tree type);
79188 +static bool walk_struct(tree node);
79189 +
79190 +static tree deconstify_type(tree old_type)
79191 +{
79192 + tree new_type, field;
79193 +
79194 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79195 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79196 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79197 + DECL_FIELD_CONTEXT(field) = new_type;
79198 + TYPE_READONLY(new_type) = 0;
79199 + C_TYPE_FIELDS_READONLY(new_type) = 0;
79200 + return new_type;
79201 +}
79202 +
79203 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79204 +{
79205 + tree type;
79206 +
79207 + *no_add_attrs = true;
79208 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79209 + error("%qE attribute does not apply to functions", name);
79210 + return NULL_TREE;
79211 + }
79212 +
79213 + if (TREE_CODE(*node) == VAR_DECL) {
79214 + error("%qE attribute does not apply to variables", name);
79215 + return NULL_TREE;
79216 + }
79217 +
79218 + if (TYPE_P(*node)) {
79219 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79220 + *no_add_attrs = false;
79221 + else
79222 + error("%qE attribute applies to struct and union types only", name);
79223 + return NULL_TREE;
79224 + }
79225 +
79226 + type = TREE_TYPE(*node);
79227 +
79228 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79229 + error("%qE attribute applies to struct and union types only", name);
79230 + return NULL_TREE;
79231 + }
79232 +
79233 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79234 + error("%qE attribute is already applied to the type", name);
79235 + return NULL_TREE;
79236 + }
79237 +
79238 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79239 + error("%qE attribute used on type that is not constified", name);
79240 + return NULL_TREE;
79241 + }
79242 +
79243 + if (TREE_CODE(*node) == TYPE_DECL) {
79244 + TREE_TYPE(*node) = deconstify_type(type);
79245 + TREE_READONLY(*node) = 0;
79246 + return NULL_TREE;
79247 + }
79248 +
79249 + return NULL_TREE;
79250 +}
79251 +
79252 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79253 +{
79254 + *no_add_attrs = true;
79255 + if (!TYPE_P(*node)) {
79256 + error("%qE attribute applies to types only", name);
79257 + return NULL_TREE;
79258 + }
79259 +
79260 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79261 + error("%qE attribute applies to struct and union types only", name);
79262 + return NULL_TREE;
79263 + }
79264 +
79265 + *no_add_attrs = false;
79266 + constify_type(*node);
79267 + return NULL_TREE;
79268 +}
79269 +
79270 +static struct attribute_spec no_const_attr = {
79271 + .name = "no_const",
79272 + .min_length = 0,
79273 + .max_length = 0,
79274 + .decl_required = false,
79275 + .type_required = false,
79276 + .function_type_required = false,
79277 + .handler = handle_no_const_attribute,
79278 +#if BUILDING_GCC_VERSION >= 4007
79279 + .affects_type_identity = true
79280 +#endif
79281 +};
79282 +
79283 +static struct attribute_spec do_const_attr = {
79284 + .name = "do_const",
79285 + .min_length = 0,
79286 + .max_length = 0,
79287 + .decl_required = false,
79288 + .type_required = false,
79289 + .function_type_required = false,
79290 + .handler = handle_do_const_attribute,
79291 +#if BUILDING_GCC_VERSION >= 4007
79292 + .affects_type_identity = true
79293 +#endif
79294 +};
79295 +
79296 +static void register_attributes(void *event_data, void *data)
79297 +{
79298 + register_attribute(&no_const_attr);
79299 + register_attribute(&do_const_attr);
79300 +}
79301 +
79302 +static void constify_type(tree type)
79303 +{
79304 + TYPE_READONLY(type) = 1;
79305 + C_TYPE_FIELDS_READONLY(type) = 1;
79306 +}
79307 +
79308 +static bool is_fptr(tree field)
79309 +{
79310 + tree ptr = TREE_TYPE(field);
79311 +
79312 + if (TREE_CODE(ptr) != POINTER_TYPE)
79313 + return false;
79314 +
79315 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79316 +}
79317 +
79318 +static bool walk_struct(tree node)
79319 +{
79320 + tree field;
79321 +
79322 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
79323 + return false;
79324 +
79325 + if (TYPE_FIELDS(node) == NULL_TREE)
79326 + return false;
79327 +
79328 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79329 + tree type = TREE_TYPE(field);
79330 + enum tree_code code = TREE_CODE(type);
79331 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79332 + if (!(walk_struct(type)))
79333 + return false;
79334 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79335 + return false;
79336 + }
79337 + return true;
79338 +}
79339 +
79340 +static void finish_type(void *event_data, void *data)
79341 +{
79342 + tree type = (tree)event_data;
79343 +
79344 + if (type == NULL_TREE)
79345 + return;
79346 +
79347 + if (TYPE_READONLY(type))
79348 + return;
79349 +
79350 + if (walk_struct(type))
79351 + constify_type(type);
79352 +}
79353 +
79354 +static unsigned int check_local_variables(void);
79355 +
79356 +struct gimple_opt_pass pass_local_variable = {
79357 + {
79358 + .type = GIMPLE_PASS,
79359 + .name = "check_local_variables",
79360 + .gate = NULL,
79361 + .execute = check_local_variables,
79362 + .sub = NULL,
79363 + .next = NULL,
79364 + .static_pass_number = 0,
79365 + .tv_id = TV_NONE,
79366 + .properties_required = 0,
79367 + .properties_provided = 0,
79368 + .properties_destroyed = 0,
79369 + .todo_flags_start = 0,
79370 + .todo_flags_finish = 0
79371 + }
79372 +};
79373 +
79374 +static unsigned int check_local_variables(void)
79375 +{
79376 + tree var;
79377 + referenced_var_iterator rvi;
79378 +
79379 +#if BUILDING_GCC_VERSION == 4005
79380 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79381 +#else
79382 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79383 +#endif
79384 + tree type = TREE_TYPE(var);
79385 +
79386 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79387 + continue;
79388 +
79389 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79390 + continue;
79391 +
79392 + if (!TYPE_READONLY(type))
79393 + continue;
79394 +
79395 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
79396 +// continue;
79397 +
79398 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
79399 +// continue;
79400 +
79401 + if (walk_struct(type)) {
79402 + error("constified variable %qE cannot be local", var);
79403 + return 1;
79404 + }
79405 + }
79406 + return 0;
79407 +}
79408 +
79409 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79410 +{
79411 + const char * const plugin_name = plugin_info->base_name;
79412 + const int argc = plugin_info->argc;
79413 + const struct plugin_argument * const argv = plugin_info->argv;
79414 + int i;
79415 + bool constify = true;
79416 +
79417 + struct register_pass_info local_variable_pass_info = {
79418 + .pass = &pass_local_variable.pass,
79419 + .reference_pass_name = "*referenced_vars",
79420 + .ref_pass_instance_number = 0,
79421 + .pos_op = PASS_POS_INSERT_AFTER
79422 + };
79423 +
79424 + if (!plugin_default_version_check(version, &gcc_version)) {
79425 + error(G_("incompatible gcc/plugin versions"));
79426 + return 1;
79427 + }
79428 +
79429 + for (i = 0; i < argc; ++i) {
79430 + if (!(strcmp(argv[i].key, "no-constify"))) {
79431 + constify = false;
79432 + continue;
79433 + }
79434 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79435 + }
79436 +
79437 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
79438 + if (constify) {
79439 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
79440 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
79441 + }
79442 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
79443 +
79444 + return 0;
79445 +}
79446 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
79447 new file mode 100644
79448 index 0000000..a5eabce
79449 --- /dev/null
79450 +++ b/tools/gcc/kallocstat_plugin.c
79451 @@ -0,0 +1,167 @@
79452 +/*
79453 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79454 + * Licensed under the GPL v2
79455 + *
79456 + * Note: the choice of the license means that the compilation process is
79457 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79458 + * but for the kernel it doesn't matter since it doesn't link against
79459 + * any of the gcc libraries
79460 + *
79461 + * gcc plugin to find the distribution of k*alloc sizes
79462 + *
79463 + * TODO:
79464 + *
79465 + * BUGS:
79466 + * - none known
79467 + */
79468 +#include "gcc-plugin.h"
79469 +#include "config.h"
79470 +#include "system.h"
79471 +#include "coretypes.h"
79472 +#include "tree.h"
79473 +#include "tree-pass.h"
79474 +#include "flags.h"
79475 +#include "intl.h"
79476 +#include "toplev.h"
79477 +#include "plugin.h"
79478 +//#include "expr.h" where are you...
79479 +#include "diagnostic.h"
79480 +#include "plugin-version.h"
79481 +#include "tm.h"
79482 +#include "function.h"
79483 +#include "basic-block.h"
79484 +#include "gimple.h"
79485 +#include "rtl.h"
79486 +#include "emit-rtl.h"
79487 +
79488 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79489 +
79490 +int plugin_is_GPL_compatible;
79491 +
79492 +static const char * const kalloc_functions[] = {
79493 + "__kmalloc",
79494 + "kmalloc",
79495 + "kmalloc_large",
79496 + "kmalloc_node",
79497 + "kmalloc_order",
79498 + "kmalloc_order_trace",
79499 + "kmalloc_slab",
79500 + "kzalloc",
79501 + "kzalloc_node",
79502 +};
79503 +
79504 +static struct plugin_info kallocstat_plugin_info = {
79505 + .version = "201111150100",
79506 +};
79507 +
79508 +static unsigned int execute_kallocstat(void);
79509 +
79510 +static struct gimple_opt_pass kallocstat_pass = {
79511 + .pass = {
79512 + .type = GIMPLE_PASS,
79513 + .name = "kallocstat",
79514 + .gate = NULL,
79515 + .execute = execute_kallocstat,
79516 + .sub = NULL,
79517 + .next = NULL,
79518 + .static_pass_number = 0,
79519 + .tv_id = TV_NONE,
79520 + .properties_required = 0,
79521 + .properties_provided = 0,
79522 + .properties_destroyed = 0,
79523 + .todo_flags_start = 0,
79524 + .todo_flags_finish = 0
79525 + }
79526 +};
79527 +
79528 +static bool is_kalloc(const char *fnname)
79529 +{
79530 + size_t i;
79531 +
79532 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
79533 + if (!strcmp(fnname, kalloc_functions[i]))
79534 + return true;
79535 + return false;
79536 +}
79537 +
79538 +static unsigned int execute_kallocstat(void)
79539 +{
79540 + basic_block bb;
79541 +
79542 + // 1. loop through BBs and GIMPLE statements
79543 + FOR_EACH_BB(bb) {
79544 + gimple_stmt_iterator gsi;
79545 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79546 + // gimple match:
79547 + tree fndecl, size;
79548 + gimple call_stmt;
79549 + const char *fnname;
79550 +
79551 + // is it a call
79552 + call_stmt = gsi_stmt(gsi);
79553 + if (!is_gimple_call(call_stmt))
79554 + continue;
79555 + fndecl = gimple_call_fndecl(call_stmt);
79556 + if (fndecl == NULL_TREE)
79557 + continue;
79558 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
79559 + continue;
79560 +
79561 + // is it a call to k*alloc
79562 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
79563 + if (!is_kalloc(fnname))
79564 + continue;
79565 +
79566 + // is the size arg the result of a simple const assignment
79567 + size = gimple_call_arg(call_stmt, 0);
79568 + while (true) {
79569 + gimple def_stmt;
79570 + expanded_location xloc;
79571 + size_t size_val;
79572 +
79573 + if (TREE_CODE(size) != SSA_NAME)
79574 + break;
79575 + def_stmt = SSA_NAME_DEF_STMT(size);
79576 + if (!def_stmt || !is_gimple_assign(def_stmt))
79577 + break;
79578 + if (gimple_num_ops(def_stmt) != 2)
79579 + break;
79580 + size = gimple_assign_rhs1(def_stmt);
79581 + if (!TREE_CONSTANT(size))
79582 + continue;
79583 + xloc = expand_location(gimple_location(def_stmt));
79584 + if (!xloc.file)
79585 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
79586 + size_val = TREE_INT_CST_LOW(size);
79587 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
79588 + break;
79589 + }
79590 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79591 +//debug_tree(gimple_call_fn(call_stmt));
79592 +//print_node(stderr, "pax", fndecl, 4);
79593 + }
79594 + }
79595 +
79596 + return 0;
79597 +}
79598 +
79599 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79600 +{
79601 + const char * const plugin_name = plugin_info->base_name;
79602 + struct register_pass_info kallocstat_pass_info = {
79603 + .pass = &kallocstat_pass.pass,
79604 + .reference_pass_name = "ssa",
79605 + .ref_pass_instance_number = 0,
79606 + .pos_op = PASS_POS_INSERT_AFTER
79607 + };
79608 +
79609 + if (!plugin_default_version_check(version, &gcc_version)) {
79610 + error(G_("incompatible gcc/plugin versions"));
79611 + return 1;
79612 + }
79613 +
79614 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
79615 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
79616 +
79617 + return 0;
79618 +}
79619 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
79620 new file mode 100644
79621 index 0000000..008f159
79622 --- /dev/null
79623 +++ b/tools/gcc/kernexec_plugin.c
79624 @@ -0,0 +1,427 @@
79625 +/*
79626 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79627 + * Licensed under the GPL v2
79628 + *
79629 + * Note: the choice of the license means that the compilation process is
79630 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79631 + * but for the kernel it doesn't matter since it doesn't link against
79632 + * any of the gcc libraries
79633 + *
79634 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
79635 + *
79636 + * TODO:
79637 + *
79638 + * BUGS:
79639 + * - none known
79640 + */
79641 +#include "gcc-plugin.h"
79642 +#include "config.h"
79643 +#include "system.h"
79644 +#include "coretypes.h"
79645 +#include "tree.h"
79646 +#include "tree-pass.h"
79647 +#include "flags.h"
79648 +#include "intl.h"
79649 +#include "toplev.h"
79650 +#include "plugin.h"
79651 +//#include "expr.h" where are you...
79652 +#include "diagnostic.h"
79653 +#include "plugin-version.h"
79654 +#include "tm.h"
79655 +#include "function.h"
79656 +#include "basic-block.h"
79657 +#include "gimple.h"
79658 +#include "rtl.h"
79659 +#include "emit-rtl.h"
79660 +#include "tree-flow.h"
79661 +
79662 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79663 +extern rtx emit_move_insn(rtx x, rtx y);
79664 +
79665 +int plugin_is_GPL_compatible;
79666 +
79667 +static struct plugin_info kernexec_plugin_info = {
79668 + .version = "201111291120",
79669 + .help = "method=[bts|or]\tinstrumentation method\n"
79670 +};
79671 +
79672 +static unsigned int execute_kernexec_reload(void);
79673 +static unsigned int execute_kernexec_fptr(void);
79674 +static unsigned int execute_kernexec_retaddr(void);
79675 +static bool kernexec_cmodel_check(void);
79676 +
79677 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
79678 +static void (*kernexec_instrument_retaddr)(rtx);
79679 +
79680 +static struct gimple_opt_pass kernexec_reload_pass = {
79681 + .pass = {
79682 + .type = GIMPLE_PASS,
79683 + .name = "kernexec_reload",
79684 + .gate = kernexec_cmodel_check,
79685 + .execute = execute_kernexec_reload,
79686 + .sub = NULL,
79687 + .next = NULL,
79688 + .static_pass_number = 0,
79689 + .tv_id = TV_NONE,
79690 + .properties_required = 0,
79691 + .properties_provided = 0,
79692 + .properties_destroyed = 0,
79693 + .todo_flags_start = 0,
79694 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79695 + }
79696 +};
79697 +
79698 +static struct gimple_opt_pass kernexec_fptr_pass = {
79699 + .pass = {
79700 + .type = GIMPLE_PASS,
79701 + .name = "kernexec_fptr",
79702 + .gate = kernexec_cmodel_check,
79703 + .execute = execute_kernexec_fptr,
79704 + .sub = NULL,
79705 + .next = NULL,
79706 + .static_pass_number = 0,
79707 + .tv_id = TV_NONE,
79708 + .properties_required = 0,
79709 + .properties_provided = 0,
79710 + .properties_destroyed = 0,
79711 + .todo_flags_start = 0,
79712 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79713 + }
79714 +};
79715 +
79716 +static struct rtl_opt_pass kernexec_retaddr_pass = {
79717 + .pass = {
79718 + .type = RTL_PASS,
79719 + .name = "kernexec_retaddr",
79720 + .gate = kernexec_cmodel_check,
79721 + .execute = execute_kernexec_retaddr,
79722 + .sub = NULL,
79723 + .next = NULL,
79724 + .static_pass_number = 0,
79725 + .tv_id = TV_NONE,
79726 + .properties_required = 0,
79727 + .properties_provided = 0,
79728 + .properties_destroyed = 0,
79729 + .todo_flags_start = 0,
79730 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
79731 + }
79732 +};
79733 +
79734 +static bool kernexec_cmodel_check(void)
79735 +{
79736 + tree section;
79737 +
79738 + if (ix86_cmodel != CM_KERNEL)
79739 + return false;
79740 +
79741 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
79742 + if (!section || !TREE_VALUE(section))
79743 + return true;
79744 +
79745 + section = TREE_VALUE(TREE_VALUE(section));
79746 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
79747 + return true;
79748 +
79749 + return false;
79750 +}
79751 +
79752 +/*
79753 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
79754 + */
79755 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
79756 +{
79757 + gimple asm_movabs_stmt;
79758 +
79759 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
79760 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
79761 + gimple_asm_set_volatile(asm_movabs_stmt, true);
79762 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
79763 + update_stmt(asm_movabs_stmt);
79764 +}
79765 +
79766 +/*
79767 + * find all asm() stmts that clobber r10 and add a reload of r10
79768 + */
79769 +static unsigned int execute_kernexec_reload(void)
79770 +{
79771 + basic_block bb;
79772 +
79773 + // 1. loop through BBs and GIMPLE statements
79774 + FOR_EACH_BB(bb) {
79775 + gimple_stmt_iterator gsi;
79776 +
79777 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79778 + // gimple match: __asm__ ("" : : : "r10");
79779 + gimple asm_stmt;
79780 + size_t nclobbers;
79781 +
79782 + // is it an asm ...
79783 + asm_stmt = gsi_stmt(gsi);
79784 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
79785 + continue;
79786 +
79787 + // ... clobbering r10
79788 + nclobbers = gimple_asm_nclobbers(asm_stmt);
79789 + while (nclobbers--) {
79790 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
79791 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
79792 + continue;
79793 + kernexec_reload_fptr_mask(&gsi);
79794 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
79795 + break;
79796 + }
79797 + }
79798 + }
79799 +
79800 + return 0;
79801 +}
79802 +
79803 +/*
79804 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
79805 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
79806 + */
79807 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
79808 +{
79809 + gimple assign_intptr, assign_new_fptr, call_stmt;
79810 + tree intptr, old_fptr, new_fptr, kernexec_mask;
79811 +
79812 + call_stmt = gsi_stmt(*gsi);
79813 + old_fptr = gimple_call_fn(call_stmt);
79814 +
79815 + // create temporary unsigned long variable used for bitops and cast fptr to it
79816 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
79817 + add_referenced_var(intptr);
79818 + mark_sym_for_renaming(intptr);
79819 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
79820 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
79821 + update_stmt(assign_intptr);
79822 +
79823 + // apply logical or to temporary unsigned long and bitmask
79824 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
79825 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
79826 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
79827 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
79828 + update_stmt(assign_intptr);
79829 +
79830 + // cast temporary unsigned long back to a temporary fptr variable
79831 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
79832 + add_referenced_var(new_fptr);
79833 + mark_sym_for_renaming(new_fptr);
79834 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
79835 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
79836 + update_stmt(assign_new_fptr);
79837 +
79838 + // replace call stmt fn with the new fptr
79839 + gimple_call_set_fn(call_stmt, new_fptr);
79840 + update_stmt(call_stmt);
79841 +}
79842 +
79843 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
79844 +{
79845 + gimple asm_or_stmt, call_stmt;
79846 + tree old_fptr, new_fptr, input, output;
79847 + VEC(tree, gc) *inputs = NULL;
79848 + VEC(tree, gc) *outputs = NULL;
79849 +
79850 + call_stmt = gsi_stmt(*gsi);
79851 + old_fptr = gimple_call_fn(call_stmt);
79852 +
79853 + // create temporary fptr variable
79854 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
79855 + add_referenced_var(new_fptr);
79856 + mark_sym_for_renaming(new_fptr);
79857 +
79858 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
79859 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
79860 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
79861 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
79862 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
79863 + VEC_safe_push(tree, gc, inputs, input);
79864 + VEC_safe_push(tree, gc, outputs, output);
79865 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
79866 + gimple_asm_set_volatile(asm_or_stmt, true);
79867 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
79868 + update_stmt(asm_or_stmt);
79869 +
79870 + // replace call stmt fn with the new fptr
79871 + gimple_call_set_fn(call_stmt, new_fptr);
79872 + update_stmt(call_stmt);
79873 +}
79874 +
79875 +/*
79876 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
79877 + */
79878 +static unsigned int execute_kernexec_fptr(void)
79879 +{
79880 + basic_block bb;
79881 +
79882 + // 1. loop through BBs and GIMPLE statements
79883 + FOR_EACH_BB(bb) {
79884 + gimple_stmt_iterator gsi;
79885 +
79886 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79887 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
79888 + tree fn;
79889 + gimple call_stmt;
79890 +
79891 + // is it a call ...
79892 + call_stmt = gsi_stmt(gsi);
79893 + if (!is_gimple_call(call_stmt))
79894 + continue;
79895 + fn = gimple_call_fn(call_stmt);
79896 + if (TREE_CODE(fn) == ADDR_EXPR)
79897 + continue;
79898 + if (TREE_CODE(fn) != SSA_NAME)
79899 + gcc_unreachable();
79900 +
79901 + // ... through a function pointer
79902 + fn = SSA_NAME_VAR(fn);
79903 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
79904 + continue;
79905 + fn = TREE_TYPE(fn);
79906 + if (TREE_CODE(fn) != POINTER_TYPE)
79907 + continue;
79908 + fn = TREE_TYPE(fn);
79909 + if (TREE_CODE(fn) != FUNCTION_TYPE)
79910 + continue;
79911 +
79912 + kernexec_instrument_fptr(&gsi);
79913 +
79914 +//debug_tree(gimple_call_fn(call_stmt));
79915 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79916 + }
79917 + }
79918 +
79919 + return 0;
79920 +}
79921 +
79922 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
79923 +static void kernexec_instrument_retaddr_bts(rtx insn)
79924 +{
79925 + rtx btsq;
79926 + rtvec argvec, constraintvec, labelvec;
79927 + int line;
79928 +
79929 + // create asm volatile("btsq $63,(%%rsp)":::)
79930 + argvec = rtvec_alloc(0);
79931 + constraintvec = rtvec_alloc(0);
79932 + labelvec = rtvec_alloc(0);
79933 + line = expand_location(RTL_LOCATION(insn)).line;
79934 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
79935 + MEM_VOLATILE_P(btsq) = 1;
79936 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
79937 + emit_insn_before(btsq, insn);
79938 +}
79939 +
79940 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
79941 +static void kernexec_instrument_retaddr_or(rtx insn)
79942 +{
79943 + rtx orq;
79944 + rtvec argvec, constraintvec, labelvec;
79945 + int line;
79946 +
79947 + // create asm volatile("orq %%r10,(%%rsp)":::)
79948 + argvec = rtvec_alloc(0);
79949 + constraintvec = rtvec_alloc(0);
79950 + labelvec = rtvec_alloc(0);
79951 + line = expand_location(RTL_LOCATION(insn)).line;
79952 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
79953 + MEM_VOLATILE_P(orq) = 1;
79954 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
79955 + emit_insn_before(orq, insn);
79956 +}
79957 +
79958 +/*
79959 + * find all asm level function returns and forcibly set the highest bit of the return address
79960 + */
79961 +static unsigned int execute_kernexec_retaddr(void)
79962 +{
79963 + rtx insn;
79964 +
79965 + // 1. find function returns
79966 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
79967 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
79968 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
79969 + rtx body;
79970 +
79971 + // is it a retn
79972 + if (!JUMP_P(insn))
79973 + continue;
79974 + body = PATTERN(insn);
79975 + if (GET_CODE(body) == PARALLEL)
79976 + body = XVECEXP(body, 0, 0);
79977 + if (GET_CODE(body) != RETURN)
79978 + continue;
79979 + kernexec_instrument_retaddr(insn);
79980 + }
79981 +
79982 +// print_simple_rtl(stderr, get_insns());
79983 +// print_rtl(stderr, get_insns());
79984 +
79985 + return 0;
79986 +}
79987 +
79988 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79989 +{
79990 + const char * const plugin_name = plugin_info->base_name;
79991 + const int argc = plugin_info->argc;
79992 + const struct plugin_argument * const argv = plugin_info->argv;
79993 + int i;
79994 + struct register_pass_info kernexec_reload_pass_info = {
79995 + .pass = &kernexec_reload_pass.pass,
79996 + .reference_pass_name = "ssa",
79997 + .ref_pass_instance_number = 0,
79998 + .pos_op = PASS_POS_INSERT_AFTER
79999 + };
80000 + struct register_pass_info kernexec_fptr_pass_info = {
80001 + .pass = &kernexec_fptr_pass.pass,
80002 + .reference_pass_name = "ssa",
80003 + .ref_pass_instance_number = 0,
80004 + .pos_op = PASS_POS_INSERT_AFTER
80005 + };
80006 + struct register_pass_info kernexec_retaddr_pass_info = {
80007 + .pass = &kernexec_retaddr_pass.pass,
80008 + .reference_pass_name = "pro_and_epilogue",
80009 + .ref_pass_instance_number = 0,
80010 + .pos_op = PASS_POS_INSERT_AFTER
80011 + };
80012 +
80013 + if (!plugin_default_version_check(version, &gcc_version)) {
80014 + error(G_("incompatible gcc/plugin versions"));
80015 + return 1;
80016 + }
80017 +
80018 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80019 +
80020 + if (TARGET_64BIT == 0)
80021 + return 0;
80022 +
80023 + for (i = 0; i < argc; ++i) {
80024 + if (!strcmp(argv[i].key, "method")) {
80025 + if (!argv[i].value) {
80026 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80027 + continue;
80028 + }
80029 + if (!strcmp(argv[i].value, "bts")) {
80030 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80031 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80032 + } else if (!strcmp(argv[i].value, "or")) {
80033 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80034 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80035 + fix_register("r10", 1, 1);
80036 + } else
80037 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80038 + continue;
80039 + }
80040 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80041 + }
80042 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80043 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80044 +
80045 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80046 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80047 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80048 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80049 +
80050 + return 0;
80051 +}
80052 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
80053 new file mode 100644
80054 index 0000000..b87ec9d
80055 --- /dev/null
80056 +++ b/tools/gcc/stackleak_plugin.c
80057 @@ -0,0 +1,313 @@
80058 +/*
80059 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80060 + * Licensed under the GPL v2
80061 + *
80062 + * Note: the choice of the license means that the compilation process is
80063 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80064 + * but for the kernel it doesn't matter since it doesn't link against
80065 + * any of the gcc libraries
80066 + *
80067 + * gcc plugin to help implement various PaX features
80068 + *
80069 + * - track lowest stack pointer
80070 + *
80071 + * TODO:
80072 + * - initialize all local variables
80073 + *
80074 + * BUGS:
80075 + * - none known
80076 + */
80077 +#include "gcc-plugin.h"
80078 +#include "config.h"
80079 +#include "system.h"
80080 +#include "coretypes.h"
80081 +#include "tree.h"
80082 +#include "tree-pass.h"
80083 +#include "flags.h"
80084 +#include "intl.h"
80085 +#include "toplev.h"
80086 +#include "plugin.h"
80087 +//#include "expr.h" where are you...
80088 +#include "diagnostic.h"
80089 +#include "plugin-version.h"
80090 +#include "tm.h"
80091 +#include "function.h"
80092 +#include "basic-block.h"
80093 +#include "gimple.h"
80094 +#include "rtl.h"
80095 +#include "emit-rtl.h"
80096 +
80097 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80098 +
80099 +int plugin_is_GPL_compatible;
80100 +
80101 +static int track_frame_size = -1;
80102 +static const char track_function[] = "pax_track_stack";
80103 +static const char check_function[] = "pax_check_alloca";
80104 +static bool init_locals;
80105 +
80106 +static struct plugin_info stackleak_plugin_info = {
80107 + .version = "201203140940",
80108 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
80109 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
80110 +};
80111 +
80112 +static bool gate_stackleak_track_stack(void);
80113 +static unsigned int execute_stackleak_tree_instrument(void);
80114 +static unsigned int execute_stackleak_final(void);
80115 +
80116 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
80117 + .pass = {
80118 + .type = GIMPLE_PASS,
80119 + .name = "stackleak_tree_instrument",
80120 + .gate = gate_stackleak_track_stack,
80121 + .execute = execute_stackleak_tree_instrument,
80122 + .sub = NULL,
80123 + .next = NULL,
80124 + .static_pass_number = 0,
80125 + .tv_id = TV_NONE,
80126 + .properties_required = PROP_gimple_leh | PROP_cfg,
80127 + .properties_provided = 0,
80128 + .properties_destroyed = 0,
80129 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
80130 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
80131 + }
80132 +};
80133 +
80134 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
80135 + .pass = {
80136 + .type = RTL_PASS,
80137 + .name = "stackleak_final",
80138 + .gate = gate_stackleak_track_stack,
80139 + .execute = execute_stackleak_final,
80140 + .sub = NULL,
80141 + .next = NULL,
80142 + .static_pass_number = 0,
80143 + .tv_id = TV_NONE,
80144 + .properties_required = 0,
80145 + .properties_provided = 0,
80146 + .properties_destroyed = 0,
80147 + .todo_flags_start = 0,
80148 + .todo_flags_finish = TODO_dump_func
80149 + }
80150 +};
80151 +
80152 +static bool gate_stackleak_track_stack(void)
80153 +{
80154 + return track_frame_size >= 0;
80155 +}
80156 +
80157 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
80158 +{
80159 + gimple check_alloca;
80160 + tree fntype, fndecl, alloca_size;
80161 +
80162 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
80163 + fndecl = build_fn_decl(check_function, fntype);
80164 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
80165 +
80166 + // insert call to void pax_check_alloca(unsigned long size)
80167 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
80168 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
80169 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
80170 +}
80171 +
80172 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
80173 +{
80174 + gimple track_stack;
80175 + tree fntype, fndecl;
80176 +
80177 + fntype = build_function_type_list(void_type_node, NULL_TREE);
80178 + fndecl = build_fn_decl(track_function, fntype);
80179 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
80180 +
80181 + // insert call to void pax_track_stack(void)
80182 + track_stack = gimple_build_call(fndecl, 0);
80183 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
80184 +}
80185 +
80186 +#if BUILDING_GCC_VERSION == 4005
80187 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
80188 +{
80189 + tree fndecl;
80190 +
80191 + if (!is_gimple_call(stmt))
80192 + return false;
80193 + fndecl = gimple_call_fndecl(stmt);
80194 + if (!fndecl)
80195 + return false;
80196 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
80197 + return false;
80198 +// print_node(stderr, "pax", fndecl, 4);
80199 + return DECL_FUNCTION_CODE(fndecl) == code;
80200 +}
80201 +#endif
80202 +
80203 +static bool is_alloca(gimple stmt)
80204 +{
80205 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
80206 + return true;
80207 +
80208 +#if BUILDING_GCC_VERSION >= 4007
80209 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
80210 + return true;
80211 +#endif
80212 +
80213 + return false;
80214 +}
80215 +
80216 +static unsigned int execute_stackleak_tree_instrument(void)
80217 +{
80218 + basic_block bb, entry_bb;
80219 + bool prologue_instrumented = false, is_leaf = true;
80220 +
80221 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
80222 +
80223 + // 1. loop through BBs and GIMPLE statements
80224 + FOR_EACH_BB(bb) {
80225 + gimple_stmt_iterator gsi;
80226 +
80227 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80228 + gimple stmt;
80229 +
80230 + stmt = gsi_stmt(gsi);
80231 +
80232 + if (is_gimple_call(stmt))
80233 + is_leaf = false;
80234 +
80235 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
80236 + if (!is_alloca(stmt))
80237 + continue;
80238 +
80239 + // 2. insert stack overflow check before each __builtin_alloca call
80240 + stackleak_check_alloca(&gsi);
80241 +
80242 + // 3. insert track call after each __builtin_alloca call
80243 + stackleak_add_instrumentation(&gsi);
80244 + if (bb == entry_bb)
80245 + prologue_instrumented = true;
80246 + }
80247 + }
80248 +
80249 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
80250 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
80251 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
80252 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
80253 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
80254 + return 0;
80255 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
80256 + return 0;
80257 +
80258 + // 4. insert track call at the beginning
80259 + if (!prologue_instrumented) {
80260 + gimple_stmt_iterator gsi;
80261 +
80262 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
80263 + if (dom_info_available_p(CDI_DOMINATORS))
80264 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
80265 + gsi = gsi_start_bb(bb);
80266 + stackleak_add_instrumentation(&gsi);
80267 + }
80268 +
80269 + return 0;
80270 +}
80271 +
80272 +static unsigned int execute_stackleak_final(void)
80273 +{
80274 + rtx insn;
80275 +
80276 + if (cfun->calls_alloca)
80277 + return 0;
80278 +
80279 + // keep calls only if function frame is big enough
80280 + if (get_frame_size() >= track_frame_size)
80281 + return 0;
80282 +
80283 + // 1. find pax_track_stack calls
80284 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80285 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
80286 + rtx body;
80287 +
80288 + if (!CALL_P(insn))
80289 + continue;
80290 + body = PATTERN(insn);
80291 + if (GET_CODE(body) != CALL)
80292 + continue;
80293 + body = XEXP(body, 0);
80294 + if (GET_CODE(body) != MEM)
80295 + continue;
80296 + body = XEXP(body, 0);
80297 + if (GET_CODE(body) != SYMBOL_REF)
80298 + continue;
80299 + if (strcmp(XSTR(body, 0), track_function))
80300 + continue;
80301 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80302 + // 2. delete call
80303 + insn = delete_insn_and_edges(insn);
80304 +#if BUILDING_GCC_VERSION >= 4007
80305 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
80306 + insn = delete_insn_and_edges(insn);
80307 +#endif
80308 + }
80309 +
80310 +// print_simple_rtl(stderr, get_insns());
80311 +// print_rtl(stderr, get_insns());
80312 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80313 +
80314 + return 0;
80315 +}
80316 +
80317 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80318 +{
80319 + const char * const plugin_name = plugin_info->base_name;
80320 + const int argc = plugin_info->argc;
80321 + const struct plugin_argument * const argv = plugin_info->argv;
80322 + int i;
80323 + struct register_pass_info stackleak_tree_instrument_pass_info = {
80324 + .pass = &stackleak_tree_instrument_pass.pass,
80325 +// .reference_pass_name = "tree_profile",
80326 + .reference_pass_name = "optimized",
80327 + .ref_pass_instance_number = 0,
80328 + .pos_op = PASS_POS_INSERT_BEFORE
80329 + };
80330 + struct register_pass_info stackleak_final_pass_info = {
80331 + .pass = &stackleak_final_rtl_opt_pass.pass,
80332 + .reference_pass_name = "final",
80333 + .ref_pass_instance_number = 0,
80334 + .pos_op = PASS_POS_INSERT_BEFORE
80335 + };
80336 +
80337 + if (!plugin_default_version_check(version, &gcc_version)) {
80338 + error(G_("incompatible gcc/plugin versions"));
80339 + return 1;
80340 + }
80341 +
80342 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
80343 +
80344 + for (i = 0; i < argc; ++i) {
80345 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
80346 + if (!argv[i].value) {
80347 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80348 + continue;
80349 + }
80350 + track_frame_size = atoi(argv[i].value);
80351 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
80352 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80353 + continue;
80354 + }
80355 + if (!strcmp(argv[i].key, "initialize-locals")) {
80356 + if (argv[i].value) {
80357 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80358 + continue;
80359 + }
80360 + init_locals = true;
80361 + continue;
80362 + }
80363 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80364 + }
80365 +
80366 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
80367 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
80368 +
80369 + return 0;
80370 +}
80371 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
80372 index 6789d78..4afd019 100644
80373 --- a/tools/perf/util/include/asm/alternative-asm.h
80374 +++ b/tools/perf/util/include/asm/alternative-asm.h
80375 @@ -5,4 +5,7 @@
80376
80377 #define altinstruction_entry #
80378
80379 + .macro pax_force_retaddr rip=0, reload=0
80380 + .endm
80381 +
80382 #endif
80383 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
80384 index af0f22f..9a7d479 100644
80385 --- a/usr/gen_init_cpio.c
80386 +++ b/usr/gen_init_cpio.c
80387 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
80388 int retval;
80389 int rc = -1;
80390 int namesize;
80391 - int i;
80392 + unsigned int i;
80393
80394 mode |= S_IFREG;
80395
80396 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
80397 *env_var = *expanded = '\0';
80398 strncat(env_var, start + 2, end - start - 2);
80399 strncat(expanded, new_location, start - new_location);
80400 - strncat(expanded, getenv(env_var), PATH_MAX);
80401 - strncat(expanded, end + 1, PATH_MAX);
80402 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
80403 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
80404 strncpy(new_location, expanded, PATH_MAX);
80405 + new_location[PATH_MAX] = 0;
80406 } else
80407 break;
80408 }
80409 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
80410 index a91f980..a58d32c 100644
80411 --- a/virt/kvm/kvm_main.c
80412 +++ b/virt/kvm/kvm_main.c
80413 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
80414
80415 static cpumask_var_t cpus_hardware_enabled;
80416 static int kvm_usage_count = 0;
80417 -static atomic_t hardware_enable_failed;
80418 +static atomic_unchecked_t hardware_enable_failed;
80419
80420 struct kmem_cache *kvm_vcpu_cache;
80421 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
80422 @@ -2312,7 +2312,7 @@ static void hardware_enable_nolock(void *junk)
80423
80424 if (r) {
80425 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
80426 - atomic_inc(&hardware_enable_failed);
80427 + atomic_inc_unchecked(&hardware_enable_failed);
80428 printk(KERN_INFO "kvm: enabling virtualization on "
80429 "CPU%d failed\n", cpu);
80430 }
80431 @@ -2366,10 +2366,10 @@ static int hardware_enable_all(void)
80432
80433 kvm_usage_count++;
80434 if (kvm_usage_count == 1) {
80435 - atomic_set(&hardware_enable_failed, 0);
80436 + atomic_set_unchecked(&hardware_enable_failed, 0);
80437 on_each_cpu(hardware_enable_nolock, NULL, 1);
80438
80439 - if (atomic_read(&hardware_enable_failed)) {
80440 + if (atomic_read_unchecked(&hardware_enable_failed)) {
80441 hardware_disable_all_nolock();
80442 r = -EBUSY;
80443 }
80444 @@ -2732,7 +2732,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
80445 kvm_arch_vcpu_put(vcpu);
80446 }
80447
80448 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80449 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80450 struct module *module)
80451 {
80452 int r;
80453 @@ -2795,7 +2795,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80454 if (!vcpu_align)
80455 vcpu_align = __alignof__(struct kvm_vcpu);
80456 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
80457 - 0, NULL);
80458 + SLAB_USERCOPY, NULL);
80459 if (!kvm_vcpu_cache) {
80460 r = -ENOMEM;
80461 goto out_free_3;
80462 @@ -2805,9 +2805,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80463 if (r)
80464 goto out_free;
80465
80466 - kvm_chardev_ops.owner = module;
80467 - kvm_vm_fops.owner = module;
80468 - kvm_vcpu_fops.owner = module;
80469 + pax_open_kernel();
80470 + *(void **)&kvm_chardev_ops.owner = module;
80471 + *(void **)&kvm_vm_fops.owner = module;
80472 + *(void **)&kvm_vcpu_fops.owner = module;
80473 + pax_close_kernel();
80474
80475 r = misc_register(&kvm_dev);
80476 if (r) {